New upstream version 16.11.5 25/12025/1 upstream/16.11.5
authorLuca Boccassi <luca.boccassi@gmail.com>
Mon, 23 Apr 2018 13:16:57 +0000 (14:16 +0100)
committerLuca Boccassi <luca.boccassi@gmail.com>
Mon, 23 Apr 2018 13:17:34 +0000 (14:17 +0100)
Change-Id: I47171042629a57c6958d50251351e668ca5f3d8b
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
126 files changed:
MAINTAINERS
app/Makefile
app/test-pmd/cmdline.c
app/test-pmd/config.c
app/test-pmd/txonly.c
app/test/test.c
app/test/test_cryptodev.c
app/test/test_memzone.c
app/test/test_pmd_perf.c
app/test/test_reorder.c
app/test/test_ring_perf.c
app/test/test_table.c
app/test/test_table_acl.c
app/test/test_timer_perf.c
buildtools/pmdinfogen/pmdinfogen.c
config/common_base
config/common_linuxapp
doc/guides/cryptodevs/aesni_mb.rst
doc/guides/nics/features/i40e.ini
doc/guides/nics/features/i40e_vec.ini
doc/guides/nics/i40e.rst
doc/guides/rel_notes/release_16_11.rst
doc/guides/sample_app_ug/keep_alive.rst
drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
drivers/crypto/qat/qat_crypto.c
drivers/net/af_packet/rte_eth_af_packet.c
drivers/net/bnxt/bnxt.h
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_hwrm.c
drivers/net/bnxt/bnxt_hwrm.h
drivers/net/bnxt/bnxt_ring.c
drivers/net/bnxt/bnxt_ring.h
drivers/net/bnxt/bnxt_rxr.c
drivers/net/bnxt/bnxt_txr.c
drivers/net/bonding/rte_eth_bond_8023ad.c
drivers/net/bonding/rte_eth_bond_api.c
drivers/net/bonding/rte_eth_bond_pmd.c
drivers/net/e1000/em_ethdev.c
drivers/net/e1000/igb_ethdev.c
drivers/net/ena/ena_ethdev.c
drivers/net/enic/enic.h
drivers/net/enic/enic_ethdev.c
drivers/net/enic/enic_main.c
drivers/net/fm10k/fm10k_ethdev.c
drivers/net/i40e/Makefile
drivers/net/i40e/base/i40e_adminq.c
drivers/net/i40e/base/i40e_common.c
drivers/net/i40e/base/i40e_nvm.c
drivers/net/i40e/base/i40e_type.h
drivers/net/i40e/i40e_ethdev.c
drivers/net/i40e/i40e_ethdev.h
drivers/net/i40e/i40e_ethdev_vf.c
drivers/net/i40e/i40e_fdir.c
drivers/net/i40e/i40e_rxtx.c
drivers/net/i40e/i40e_rxtx_vec_altivec.c [new file with mode: 0644]
drivers/net/ixgbe/base/ixgbe_82599.c
drivers/net/ixgbe/base/ixgbe_api.c
drivers/net/ixgbe/base/ixgbe_common.c
drivers/net/ixgbe/base/ixgbe_mbx.c
drivers/net/ixgbe/base/ixgbe_type.h
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_ethdev.c
drivers/net/nfp/nfp_net.c
drivers/net/null/rte_eth_null.c
drivers/net/pcap/rte_eth_pcap.c
drivers/net/qede/base/ecore_dcbx.c
drivers/net/qede/base/ecore_vf.c
drivers/net/qede/base/ecore_vfpf_if.h
drivers/net/qede/qede_ethdev.c
drivers/net/qede/qede_rxtx.c
drivers/net/qede/qede_rxtx.h
drivers/net/ring/rte_eth_ring.c
drivers/net/szedata2/rte_eth_szedata2.c
drivers/net/thunderx/nicvf_ethdev.c
drivers/net/thunderx/nicvf_rxtx.c
drivers/net/vhost/rte_eth_vhost.c
drivers/net/virtio/virtio_ethdev.c
drivers/net/virtio/virtio_rxtx.c
drivers/net/virtio/virtio_rxtx.h
drivers/net/virtio/virtio_rxtx_simple.c
drivers/net/virtio/virtio_rxtx_simple.h
drivers/net/virtio/virtio_user/virtio_user_dev.c
drivers/net/virtio/virtqueue.c
drivers/net/virtio/virtqueue.h
drivers/net/vmxnet3/vmxnet3_ethdev.c
drivers/net/xenvirt/virtqueue.h
examples/bond/main.c
examples/exception_path/main.c
examples/ip_pipeline/init.c
examples/ipsec-secgw/ipsec-secgw.c
examples/ipsec-secgw/sa.c
examples/l3fwd-power/main.c
examples/vhost/main.c
lib/librte_eal/bsdapp/contigmem/contigmem.c
lib/librte_eal/bsdapp/eal/eal_memory.c
lib/librte_eal/common/eal_common_memzone.c
lib/librte_eal/common/eal_common_pci_uio.c
lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
lib/librte_eal/common/include/arch/x86/rte_atomic.h
lib/librte_eal/common/include/rte_debug.h
lib/librte_eal/common/include/rte_version.h
lib/librte_eal/common/malloc_elem.c
lib/librte_eal/common/malloc_heap.c
lib/librte_eal/common/malloc_heap.h
lib/librte_eal/common/rte_keepalive.c
lib/librte_eal/linuxapp/eal/eal_pci.c
lib/librte_eal/linuxapp/eal/eal_vfio.c
lib/librte_eal/linuxapp/eal/eal_vfio.h
lib/librte_eal/linuxapp/igb_uio/compat.h
lib/librte_eal/linuxapp/igb_uio/igb_uio.c
lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
lib/librte_ether/rte_ethdev.c
lib/librte_ether/rte_ethdev.h
lib/librte_lpm/rte_lpm.c
lib/librte_mbuf/rte_mbuf.h
lib/librte_pdump/rte_pdump.c
lib/librte_vhost/socket.c
lib/librte_vhost/vhost.c
lib/librte_vhost/vhost.h
lib/librte_vhost/vhost_user.c
lib/librte_vhost/virtio_net.c
mk/internal/rte.extvars.mk
pkg/dpdk.spec
tools/dpdk-devbind.py

index 065397b..a380b5d 100644 (file)
@@ -166,6 +166,7 @@ IBM POWER
 M: Chao Zhu <chaozhu@linux.vnet.ibm.com>
 F: lib/librte_eal/common/arch/ppc_64/
 F: lib/librte_eal/common/include/arch/ppc_64/
 M: Chao Zhu <chaozhu@linux.vnet.ibm.com>
 F: lib/librte_eal/common/arch/ppc_64/
 F: lib/librte_eal/common/include/arch/ppc_64/
+F: drivers/net/i40e/i40e_rxtx_vec_altivec.c
 
 Intel x86
 M: Bruce Richardson <bruce.richardson@intel.com>
 
 Intel x86
 M: Bruce Richardson <bruce.richardson@intel.com>
index 30ec292..62d5d02 100644 (file)
@@ -36,7 +36,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_ACL) += test-acl
 DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += test-pipeline
 DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd
 DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_test
 DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += test-pipeline
 DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd
 DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_test
-DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += proc_info
+DIRS-$(CONFIG_RTE_PROC_INFO) += proc_info
 DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += pdump
 
 include $(RTE_SDK)/mk/rte.subdir.mk
 DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += pdump
 
 include $(RTE_SDK)/mk/rte.subdir.mk
index f4ff318..7171bd6 100644 (file)
@@ -8387,11 +8387,11 @@ struct cmd_flow_director_result {
        uint16_t port_dst;
        cmdline_fixed_string_t verify_tag;
        uint32_t verify_tag_value;
        uint16_t port_dst;
        cmdline_fixed_string_t verify_tag;
        uint32_t verify_tag_value;
-       cmdline_ipaddr_t tos;
+       cmdline_fixed_string_t tos;
        uint8_t tos_value;
        uint8_t tos_value;
-       cmdline_ipaddr_t proto;
+       cmdline_fixed_string_t proto;
        uint8_t proto_value;
        uint8_t proto_value;
-       cmdline_ipaddr_t ttl;
+       cmdline_fixed_string_t ttl;
        uint8_t ttl_value;
        cmdline_fixed_string_t vlan;
        uint16_t vlan_value;
        uint8_t ttl_value;
        cmdline_fixed_string_t vlan;
        uint16_t vlan_value;
@@ -8923,7 +8923,7 @@ cmdline_parse_inst_t cmd_add_del_sctp_flow_director = {
                (void *)&cmd_flow_director_flow_type,
                (void *)&cmd_flow_director_src,
                (void *)&cmd_flow_director_ip_src,
                (void *)&cmd_flow_director_flow_type,
                (void *)&cmd_flow_director_src,
                (void *)&cmd_flow_director_ip_src,
-               (void *)&cmd_flow_director_port_dst,
+               (void *)&cmd_flow_director_port_src,
                (void *)&cmd_flow_director_dst,
                (void *)&cmd_flow_director_ip_dst,
                (void *)&cmd_flow_director_port_dst,
                (void *)&cmd_flow_director_dst,
                (void *)&cmd_flow_director_ip_dst,
                (void *)&cmd_flow_director_port_dst,
index c50b62e..69fa04b 100644 (file)
@@ -1155,6 +1155,36 @@ setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
        }
 }
 
        }
 }
 
+static portid_t
+fwd_topology_tx_port_get(portid_t rxp)
+{
+       static int warning_once = 1;
+
+       RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
+
+       switch (port_topology) {
+       default:
+       case PORT_TOPOLOGY_PAIRED:
+               if ((rxp & 0x1) == 0) {
+                       if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
+                               return rxp + 1;
+                       if (warning_once) {
+                               printf("\nWarning! port-topology=paired"
+                                      " and odd forward ports number,"
+                                      " the last port will pair with"
+                                      " itself.\n\n");
+                               warning_once = 0;
+                       }
+                       return rxp;
+               }
+               return rxp - 1;
+       case PORT_TOPOLOGY_CHAINED:
+               return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
+       case PORT_TOPOLOGY_LOOP:
+               return rxp;
+       }
+}
+
 static void
 simple_fwd_config_setup(void)
 {
 static void
 simple_fwd_config_setup(void)
 {
@@ -1217,11 +1247,6 @@ simple_fwd_config_setup(void)
  * For the RSS forwarding test all streams distributed over lcores. Each stream
  * being composed of a RX queue to poll on a RX port for input messages,
  * associated with a TX queue of a TX port where to send forwarded packets.
  * For the RSS forwarding test all streams distributed over lcores. Each stream
  * being composed of a RX queue to poll on a RX port for input messages,
  * associated with a TX queue of a TX port where to send forwarded packets.
- * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
- * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
- * following rules:
- *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
- *    - TxQl = RxQj
  */
 static void
 rss_fwd_config_setup(void)
  */
 static void
 rss_fwd_config_setup(void)
@@ -1253,18 +1278,7 @@ rss_fwd_config_setup(void)
                struct fwd_stream *fs;
 
                fs = fwd_streams[sm_id];
                struct fwd_stream *fs;
 
                fs = fwd_streams[sm_id];
-
-               if ((rxp & 0x1) == 0)
-                       txp = (portid_t) (rxp + 1);
-               else
-                       txp = (portid_t) (rxp - 1);
-               /*
-                * if we are in loopback, simply send stuff out through the
-                * ingress port
-                */
-               if (port_topology == PORT_TOPOLOGY_LOOP)
-                       txp = rxp;
-
+               txp = fwd_topology_tx_port_get(rxp);
                fs->rx_port = fwd_ports_ids[rxp];
                fs->rx_queue = rxq;
                fs->tx_port = fwd_ports_ids[txp];
                fs->rx_port = fwd_ports_ids[rxp];
                fs->rx_queue = rxq;
                fs->tx_port = fwd_ports_ids[txp];
@@ -1279,11 +1293,7 @@ rss_fwd_config_setup(void)
                 * Restart from RX queue 0 on next RX port
                 */
                rxq = 0;
                 * Restart from RX queue 0 on next RX port
                 */
                rxq = 0;
-               if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
-                       rxp = (portid_t)
-                               (rxp + ((nb_ports >> 1) / nb_fwd_ports));
-               else
-                       rxp = (portid_t) (rxp + 1);
+               rxp++;
        }
 }
 
        }
 }
 
index 8513a06..1ee2a91 100644 (file)
@@ -106,6 +106,7 @@ copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
                buf = ((char*) buf + copy_len);
                seg = seg->next;
                seg_buf = rte_pktmbuf_mtod(seg, char *);
                buf = ((char*) buf + copy_len);
                seg = seg->next;
                seg_buf = rte_pktmbuf_mtod(seg, char *);
+               copy_len = seg->data_len;
        }
        rte_memcpy(seg_buf, buf, (size_t) len);
 }
        }
        rte_memcpy(seg_buf, buf, (size_t) len);
 }
index cd0e784..76941af 100644 (file)
@@ -165,8 +165,20 @@ unit_test_suite_runner(struct unit_test_suite *suite)
        }
 
        if (suite->setup)
        }
 
        if (suite->setup)
-               if (suite->setup() != 0)
+               if (suite->setup() != 0) {
+                       /*
+                        * setup failed, so count all enabled tests and mark
+                        * them as failed
+                        */
+                       while (suite->unit_test_cases[total].testcase) {
+                               if (!suite->unit_test_cases[total].enabled)
+                                       skipped++;
+                               else
+                                       failed++;
+                               total++;
+                       }
                        goto suite_summary;
                        goto suite_summary;
+               }
 
        printf(" + ------------------------------------------------------- +\n");
 
 
        printf(" + ------------------------------------------------------- +\n");
 
index b544ab9..c9e0b66 100644 (file)
@@ -30,6 +30,8 @@
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <time.h>
+
 #include <rte_common.h>
 #include <rte_hexdump.h>
 #include <rte_mbuf.h>
 #include <rte_common.h>
 #include <rte_hexdump.h>
 #include <rte_mbuf.h>
index 72cda00..53be29f 100644 (file)
@@ -33,6 +33,7 @@
 
 #include <stdio.h>
 #include <stdint.h>
 
 #include <stdio.h>
 #include <stdint.h>
+#include <string.h>
 #include <inttypes.h>
 #include <sys/queue.h>
 
 #include <inttypes.h>
 #include <sys/queue.h>
 
@@ -76,6 +77,8 @@
  * - Check flags for specific huge page size reservation
  */
 
  * - Check flags for specific huge page size reservation
  */
 
+#define TEST_MEMZONE_NAME(suffix) "MZ_TEST_" suffix
+
 /* Test if memory overlaps: return 1 if true, or 0 if false. */
 static int
 is_memory_overlap(phys_addr_t ptr1, size_t len1, phys_addr_t ptr2, size_t len2)
 /* Test if memory overlaps: return 1 if true, or 0 if false. */
 static int
 is_memory_overlap(phys_addr_t ptr1, size_t len1, phys_addr_t ptr2, size_t len2)
@@ -92,14 +95,14 @@ test_memzone_invalid_alignment(void)
 {
        const struct rte_memzone * mz;
 
 {
        const struct rte_memzone * mz;
 
-       mz = rte_memzone_lookup("invalid_alignment");
+       mz = rte_memzone_lookup(TEST_MEMZONE_NAME("invalid_alignment"));
        if (mz != NULL) {
                printf("Zone with invalid alignment has been reserved\n");
                return -1;
        }
 
        if (mz != NULL) {
                printf("Zone with invalid alignment has been reserved\n");
                return -1;
        }
 
-       mz = rte_memzone_reserve_aligned("invalid_alignment", 100,
-                       SOCKET_ID_ANY, 0, 100);
+       mz = rte_memzone_reserve_aligned(TEST_MEMZONE_NAME("invalid_alignment"),
+                                        100, SOCKET_ID_ANY, 0, 100);
        if (mz != NULL) {
                printf("Zone with invalid alignment has been reserved\n");
                return -1;
        if (mz != NULL) {
                printf("Zone with invalid alignment has been reserved\n");
                return -1;
@@ -112,14 +115,16 @@ test_memzone_reserving_zone_size_bigger_than_the_maximum(void)
 {
        const struct rte_memzone * mz;
 
 {
        const struct rte_memzone * mz;
 
-       mz = rte_memzone_lookup("zone_size_bigger_than_the_maximum");
+       mz = rte_memzone_lookup(
+                       TEST_MEMZONE_NAME("zone_size_bigger_than_the_maximum"));
        if (mz != NULL) {
                printf("zone_size_bigger_than_the_maximum has been reserved\n");
                return -1;
        }
 
        if (mz != NULL) {
                printf("zone_size_bigger_than_the_maximum has been reserved\n");
                return -1;
        }
 
-       mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", (size_t)-1,
-                       SOCKET_ID_ANY, 0);
+       mz = rte_memzone_reserve(
+                       TEST_MEMZONE_NAME("zone_size_bigger_than_the_maximum"),
+                       (size_t)-1, SOCKET_ID_ANY, 0);
        if (mz != NULL) {
                printf("It is impossible to reserve such big a memzone\n");
                return -1;
        if (mz != NULL) {
                printf("It is impossible to reserve such big a memzone\n");
                return -1;
@@ -166,8 +171,8 @@ test_memzone_reserve_flags(void)
         * available page size (i.e 1GB ) when 2MB pages are unavailable.
         */
        if (hugepage_2MB_avail) {
         * available page size (i.e 1GB ) when 2MB pages are unavailable.
         */
        if (hugepage_2MB_avail) {
-               mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
-                               RTE_MEMZONE_2MB);
+               mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_2M"),
+                               size, SOCKET_ID_ANY, RTE_MEMZONE_2MB);
                if (mz == NULL) {
                        printf("MEMZONE FLAG 2MB\n");
                        return -1;
                if (mz == NULL) {
                        printf("MEMZONE FLAG 2MB\n");
                        return -1;
@@ -181,7 +186,8 @@ test_memzone_reserve_flags(void)
                        return -1;
                }
 
                        return -1;
                }
 
-               mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
+               mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_2M_HINT"),
+                               size, SOCKET_ID_ANY,
                                RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
                if (mz == NULL) {
                        printf("MEMZONE FLAG 2MB\n");
                                RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
                if (mz == NULL) {
                        printf("MEMZONE FLAG 2MB\n");
@@ -200,7 +206,9 @@ test_memzone_reserve_flags(void)
                 * HINT flag is indicated
                 */
                if (!hugepage_1GB_avail) {
                 * HINT flag is indicated
                 */
                if (!hugepage_1GB_avail) {
-                       mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
+                       mz = rte_memzone_reserve(
+                                       TEST_MEMZONE_NAME("flag_zone_1G_HINT"),
+                                       size, SOCKET_ID_ANY,
                                        RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
                        if (mz == NULL) {
                                printf("MEMZONE FLAG 1GB & HINT\n");
                                        RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
                        if (mz == NULL) {
                                printf("MEMZONE FLAG 1GB & HINT\n");
@@ -215,8 +223,9 @@ test_memzone_reserve_flags(void)
                                return -1;
                        }
 
                                return -1;
                        }
 
-                       mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
-                                       RTE_MEMZONE_1GB);
+                       mz = rte_memzone_reserve(
+                                       TEST_MEMZONE_NAME("flag_zone_1G"), size,
+                                       SOCKET_ID_ANY, RTE_MEMZONE_1GB);
                        if (mz != NULL) {
                                printf("MEMZONE FLAG 1GB\n");
                                return -1;
                        if (mz != NULL) {
                                printf("MEMZONE FLAG 1GB\n");
                                return -1;
@@ -226,8 +235,8 @@ test_memzone_reserve_flags(void)
 
        /*As with 2MB tests above for 1GB huge page requests*/
        if (hugepage_1GB_avail) {
 
        /*As with 2MB tests above for 1GB huge page requests*/
        if (hugepage_1GB_avail) {
-               mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
-                               RTE_MEMZONE_1GB);
+               mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_1G"),
+                               size, SOCKET_ID_ANY, RTE_MEMZONE_1GB);
                if (mz == NULL) {
                        printf("MEMZONE FLAG 1GB\n");
                        return -1;
                if (mz == NULL) {
                        printf("MEMZONE FLAG 1GB\n");
                        return -1;
@@ -241,7 +250,8 @@ test_memzone_reserve_flags(void)
                        return -1;
                }
 
                        return -1;
                }
 
-               mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
+               mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_1G_HINT"),
+                               size, SOCKET_ID_ANY,
                                RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
                if (mz == NULL) {
                        printf("MEMZONE FLAG 1GB\n");
                                RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
                if (mz == NULL) {
                        printf("MEMZONE FLAG 1GB\n");
@@ -260,7 +270,9 @@ test_memzone_reserve_flags(void)
                 * HINT flag is indicated
                 */
                if (!hugepage_2MB_avail) {
                 * HINT flag is indicated
                 */
                if (!hugepage_2MB_avail) {
-                       mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
+                       mz = rte_memzone_reserve(
+                                       TEST_MEMZONE_NAME("flag_zone_2M_HINT"),
+                                       size, SOCKET_ID_ANY,
                                        RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
                        if (mz == NULL){
                                printf("MEMZONE FLAG 2MB & HINT\n");
                                        RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
                        if (mz == NULL){
                                printf("MEMZONE FLAG 2MB & HINT\n");
@@ -274,25 +286,33 @@ test_memzone_reserve_flags(void)
                                printf("Fail memzone free\n");
                                return -1;
                        }
                                printf("Fail memzone free\n");
                                return -1;
                        }
-                       mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
-                                       RTE_MEMZONE_2MB);
+                       mz = rte_memzone_reserve(
+                                       TEST_MEMZONE_NAME("flag_zone_2M"), size,
+                                       SOCKET_ID_ANY, RTE_MEMZONE_2MB);
                        if (mz != NULL) {
                                printf("MEMZONE FLAG 2MB\n");
                                return -1;
                        }
                        if (mz != NULL) {
                                printf("MEMZONE FLAG 2MB\n");
                                return -1;
                        }
-                       if (rte_memzone_free(mz)) {
-                               printf("Fail memzone free\n");
-                               return -1;
-                       }
                }
 
                if (hugepage_2MB_avail && hugepage_1GB_avail) {
                }
 
                if (hugepage_2MB_avail && hugepage_1GB_avail) {
-                       mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
-                                                               RTE_MEMZONE_2MB|RTE_MEMZONE_1GB);
-                       if (mz != NULL) {
+                       mz = rte_memzone_reserve(
+                                       TEST_MEMZONE_NAME("flag_zone_2M_HINT"),
+                                       size, SOCKET_ID_ANY,
+                                       RTE_MEMZONE_2MB|RTE_MEMZONE_1GB);
+                       if (mz == NULL) {
                                printf("BOTH SIZES SET\n");
                                return -1;
                        }
                                printf("BOTH SIZES SET\n");
                                return -1;
                        }
+                       if (mz->hugepage_sz != RTE_PGSIZE_1G &&
+                                       mz->hugepage_sz != RTE_PGSIZE_2M) {
+                               printf("Wrong size when both sizes set\n");
+                               return -1;
+                       }
+                       if (rte_memzone_free(mz)) {
+                               printf("Fail memzone free\n");
+                               return -1;
+                       }
                }
        }
        /*
                }
        }
        /*
@@ -303,8 +323,8 @@ test_memzone_reserve_flags(void)
         * page size (i.e 16GB ) when 16MB pages are unavailable.
         */
        if (hugepage_16MB_avail) {
         * page size (i.e 16GB ) when 16MB pages are unavailable.
         */
        if (hugepage_16MB_avail) {
-               mz = rte_memzone_reserve("flag_zone_16M", size, SOCKET_ID_ANY,
-                               RTE_MEMZONE_16MB);
+               mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_16M"),
+                               size, SOCKET_ID_ANY, RTE_MEMZONE_16MB);
                if (mz == NULL) {
                        printf("MEMZONE FLAG 16MB\n");
                        return -1;
                if (mz == NULL) {
                        printf("MEMZONE FLAG 16MB\n");
                        return -1;
@@ -318,8 +338,10 @@ test_memzone_reserve_flags(void)
                        return -1;
                }
 
                        return -1;
                }
 
-               mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
-               SOCKET_ID_ANY, RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
+               mz = rte_memzone_reserve(
+                               TEST_MEMZONE_NAME("flag_zone_16M_HINT"), size,
+                               SOCKET_ID_ANY,
+                               RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
                if (mz == NULL) {
                        printf("MEMZONE FLAG 2MB\n");
                        return -1;
                if (mz == NULL) {
                        printf("MEMZONE FLAG 2MB\n");
                        return -1;
@@ -337,9 +359,11 @@ test_memzone_reserve_flags(void)
                 * unless HINT flag is indicated
                 */
                if (!hugepage_16GB_avail) {
                 * unless HINT flag is indicated
                 */
                if (!hugepage_16GB_avail) {
-                       mz = rte_memzone_reserve("flag_zone_16G_HINT", size,
-                               SOCKET_ID_ANY,
-                               RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
+                       mz = rte_memzone_reserve(
+                                       TEST_MEMZONE_NAME("flag_zone_16G_HINT"),
+                                       size, SOCKET_ID_ANY,
+                                       RTE_MEMZONE_16GB |
+                                       RTE_MEMZONE_SIZE_HINT_ONLY);
                        if (mz == NULL) {
                                printf("MEMZONE FLAG 16GB & HINT\n");
                                return -1;
                        if (mz == NULL) {
                                printf("MEMZONE FLAG 16GB & HINT\n");
                                return -1;
@@ -353,8 +377,10 @@ test_memzone_reserve_flags(void)
                                return -1;
                        }
 
                                return -1;
                        }
 
-                       mz = rte_memzone_reserve("flag_zone_16G", size,
-                               SOCKET_ID_ANY, RTE_MEMZONE_16GB);
+                       mz = rte_memzone_reserve(
+                                       TEST_MEMZONE_NAME("flag_zone_16G"),
+                                       size,
+                                       SOCKET_ID_ANY, RTE_MEMZONE_16GB);
                        if (mz != NULL) {
                                printf("MEMZONE FLAG 16GB\n");
                                return -1;
                        if (mz != NULL) {
                                printf("MEMZONE FLAG 16GB\n");
                                return -1;
@@ -363,8 +389,8 @@ test_memzone_reserve_flags(void)
        }
        /*As with 16MB tests above for 16GB huge page requests*/
        if (hugepage_16GB_avail) {
        }
        /*As with 16MB tests above for 16GB huge page requests*/
        if (hugepage_16GB_avail) {
-               mz = rte_memzone_reserve("flag_zone_16G", size, SOCKET_ID_ANY,
-                               RTE_MEMZONE_16GB);
+               mz = rte_memzone_reserve(TEST_MEMZONE_NAME("flag_zone_16G"),
+                               size, SOCKET_ID_ANY, RTE_MEMZONE_16GB);
                if (mz == NULL) {
                        printf("MEMZONE FLAG 16GB\n");
                        return -1;
                if (mz == NULL) {
                        printf("MEMZONE FLAG 16GB\n");
                        return -1;
@@ -378,8 +404,10 @@ test_memzone_reserve_flags(void)
                        return -1;
                }
 
                        return -1;
                }
 
-               mz = rte_memzone_reserve("flag_zone_16G_HINT", size,
-               SOCKET_ID_ANY, RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
+               mz = rte_memzone_reserve(
+                               TEST_MEMZONE_NAME("flag_zone_16G_HINT"), size,
+                               SOCKET_ID_ANY,
+                               RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
                if (mz == NULL) {
                        printf("MEMZONE FLAG 16GB\n");
                        return -1;
                if (mz == NULL) {
                        printf("MEMZONE FLAG 16GB\n");
                        return -1;
@@ -397,9 +425,11 @@ test_memzone_reserve_flags(void)
                 * unless HINT flag is indicated
                 */
                if (!hugepage_16MB_avail) {
                 * unless HINT flag is indicated
                 */
                if (!hugepage_16MB_avail) {
-                       mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
-                               SOCKET_ID_ANY,
-                               RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
+                       mz = rte_memzone_reserve(
+                                       TEST_MEMZONE_NAME("flag_zone_16M_HINT"),
+                                       size, SOCKET_ID_ANY,
+                                       RTE_MEMZONE_16MB |
+                                       RTE_MEMZONE_SIZE_HINT_ONLY);
                        if (mz == NULL) {
                                printf("MEMZONE FLAG 16MB & HINT\n");
                                return -1;
                        if (mz == NULL) {
                                printf("MEMZONE FLAG 16MB & HINT\n");
                                return -1;
@@ -412,8 +442,9 @@ test_memzone_reserve_flags(void)
                                printf("Fail memzone free\n");
                                return -1;
                        }
                                printf("Fail memzone free\n");
                                return -1;
                        }
-                       mz = rte_memzone_reserve("flag_zone_16M", size,
-                               SOCKET_ID_ANY, RTE_MEMZONE_16MB);
+                       mz = rte_memzone_reserve(
+                                       TEST_MEMZONE_NAME("flag_zone_16M"),
+                                       size, SOCKET_ID_ANY, RTE_MEMZONE_16MB);
                        if (mz != NULL) {
                                printf("MEMZONE FLAG 16MB\n");
                                return -1;
                        if (mz != NULL) {
                                printf("MEMZONE FLAG 16MB\n");
                                return -1;
@@ -421,13 +452,23 @@ test_memzone_reserve_flags(void)
                }
 
                if (hugepage_16MB_avail && hugepage_16GB_avail) {
                }
 
                if (hugepage_16MB_avail && hugepage_16GB_avail) {
-                       mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
-                               SOCKET_ID_ANY,
-                               RTE_MEMZONE_16MB|RTE_MEMZONE_16GB);
-                       if (mz != NULL) {
+                       mz = rte_memzone_reserve(
+                                       TEST_MEMZONE_NAME("flag_zone_16M_HINT"),
+                                       size, SOCKET_ID_ANY,
+                                       RTE_MEMZONE_16MB|RTE_MEMZONE_16GB);
+                       if (mz == NULL) {
                                printf("BOTH SIZES SET\n");
                                return -1;
                        }
                                printf("BOTH SIZES SET\n");
                                return -1;
                        }
+                       if (mz->hugepage_sz != RTE_PGSIZE_16G &&
+                                       mz->hugepage_sz != RTE_PGSIZE_16M) {
+                               printf("Wrong size when both sizes set\n");
+                               return -1;
+                       }
+                       if (rte_memzone_free(mz)) {
+                               printf("Fail memzone free\n");
+                               return -1;
+                       }
                }
        }
        return 0;
                }
        }
        return 0;
@@ -470,7 +511,8 @@ test_memzone_reserve_max(void)
                return 0;
        }
 
                return 0;
        }
 
-       mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0);
+       mz = rte_memzone_reserve(TEST_MEMZONE_NAME("max_zone"), 0,
+                       SOCKET_ID_ANY, 0);
        if (mz == NULL){
                printf("Failed to reserve a big chunk of memory - %s\n",
                                rte_strerror(rte_errno));
        if (mz == NULL){
                printf("Failed to reserve a big chunk of memory - %s\n",
                                rte_strerror(rte_errno));
@@ -512,8 +554,8 @@ test_memzone_reserve_max_aligned(void)
                return 0;
        }
 
                return 0;
        }
 
-       mz = rte_memzone_reserve_aligned("max_zone_aligned", 0,
-                       SOCKET_ID_ANY, 0, align);
+       mz = rte_memzone_reserve_aligned(TEST_MEMZONE_NAME("max_zone_aligned"),
+                       0, SOCKET_ID_ANY, 0, align);
        if (mz == NULL){
                printf("Failed to reserve a big chunk of memory - %s\n",
                                rte_strerror(rte_errno));
        if (mz == NULL){
                printf("Failed to reserve a big chunk of memory - %s\n",
                                rte_strerror(rte_errno));
@@ -550,24 +592,29 @@ test_memzone_aligned(void)
        const struct rte_memzone *memzone_aligned_1024;
 
        /* memzone that should automatically be adjusted to align on 64 bytes */
        const struct rte_memzone *memzone_aligned_1024;
 
        /* memzone that should automatically be adjusted to align on 64 bytes */
-       memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100,
-                               SOCKET_ID_ANY, 0, 32);
+       memzone_aligned_32 = rte_memzone_reserve_aligned(
+                       TEST_MEMZONE_NAME("aligned_32"), 100, SOCKET_ID_ANY, 0,
+                       32);
 
        /* memzone that is supposed to be aligned on a 128 byte boundary */
 
        /* memzone that is supposed to be aligned on a 128 byte boundary */
-       memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100,
-                               SOCKET_ID_ANY, 0, 128);
+       memzone_aligned_128 = rte_memzone_reserve_aligned(
+                       TEST_MEMZONE_NAME("aligned_128"), 100, SOCKET_ID_ANY, 0,
+                       128);
 
        /* memzone that is supposed to be aligned on a 256 byte boundary */
 
        /* memzone that is supposed to be aligned on a 256 byte boundary */
-       memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100,
-                               SOCKET_ID_ANY, 0, 256);
+       memzone_aligned_256 = rte_memzone_reserve_aligned(
+                       TEST_MEMZONE_NAME("aligned_256"), 100, SOCKET_ID_ANY, 0,
+                       256);
 
        /* memzone that is supposed to be aligned on a 512 byte boundary */
 
        /* memzone that is supposed to be aligned on a 512 byte boundary */
-       memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100,
-                               SOCKET_ID_ANY, 0, 512);
+       memzone_aligned_512 = rte_memzone_reserve_aligned(
+                       TEST_MEMZONE_NAME("aligned_512"), 100, SOCKET_ID_ANY, 0,
+                       512);
 
        /* memzone that is supposed to be aligned on a 1024 byte boundary */
 
        /* memzone that is supposed to be aligned on a 1024 byte boundary */
-       memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100,
-                               SOCKET_ID_ANY, 0, 1024);
+       memzone_aligned_1024 = rte_memzone_reserve_aligned(
+                       TEST_MEMZONE_NAME("aligned_1024"), 100, SOCKET_ID_ANY,
+                       0, 1024);
 
        printf("check alignments and lengths\n");
        if (memzone_aligned_32 == NULL) {
 
        printf("check alignments and lengths\n");
        if (memzone_aligned_32 == NULL) {
@@ -736,37 +783,46 @@ static int
 test_memzone_bounded(void)
 {
        const struct rte_memzone *memzone_err;
 test_memzone_bounded(void)
 {
        const struct rte_memzone *memzone_err;
-       const char *name;
        int rc;
 
        /* should fail as boundary is not power of two */
        int rc;
 
        /* should fail as boundary is not power of two */
-       name = "bounded_error_31";
-       if ((memzone_err = rte_memzone_reserve_bounded(name,
-                       100, SOCKET_ID_ANY, 0, 32, UINT32_MAX)) != NULL) {
+       memzone_err = rte_memzone_reserve_bounded(
+                       TEST_MEMZONE_NAME("bounded_error_31"), 100,
+                       SOCKET_ID_ANY, 0, 32, UINT32_MAX);
+       if (memzone_err != NULL) {
                printf("%s(%s)created a memzone with invalid boundary "
                        "conditions\n", __func__, memzone_err->name);
                return -1;
        }
 
        /* should fail as len is greater then boundary */
                printf("%s(%s)created a memzone with invalid boundary "
                        "conditions\n", __func__, memzone_err->name);
                return -1;
        }
 
        /* should fail as len is greater then boundary */
-       name = "bounded_error_32";
-       if ((memzone_err = rte_memzone_reserve_bounded(name,
-                       100, SOCKET_ID_ANY, 0, 32, 32)) != NULL) {
+       memzone_err = rte_memzone_reserve_bounded(
+                       TEST_MEMZONE_NAME("bounded_error_32"), 100,
+                       SOCKET_ID_ANY, 0, 32, 32);
+       if (memzone_err != NULL) {
                printf("%s(%s)created a memzone with invalid boundary "
                        "conditions\n", __func__, memzone_err->name);
                return -1;
        }
 
                printf("%s(%s)created a memzone with invalid boundary "
                        "conditions\n", __func__, memzone_err->name);
                return -1;
        }
 
-       if ((rc = check_memzone_bounded("bounded_128", 100, 128, 128)) != 0)
+       rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_128"), 100, 128,
+                       128);
+       if (rc != 0)
                return rc;
 
                return rc;
 
-       if ((rc = check_memzone_bounded("bounded_256", 100, 256, 128)) != 0)
+       rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_256"), 100, 256,
+                       128);
+       if (rc != 0)
                return rc;
 
                return rc;
 
-       if ((rc = check_memzone_bounded("bounded_1K", 100, 64, 1024)) != 0)
+       rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_1K"), 100, 64,
+                       1024);
+       if (rc != 0)
                return rc;
 
                return rc;
 
-       if ((rc = check_memzone_bounded("bounded_1K_MAX", 0, 64, 1024)) != 0)
+       rc = check_memzone_bounded(TEST_MEMZONE_NAME("bounded_1K_MAX"), 0, 64,
+                       1024);
+       if (rc != 0)
                return rc;
 
        return 0;
                return rc;
 
        return 0;
@@ -775,29 +831,32 @@ test_memzone_bounded(void)
 static int
 test_memzone_free(void)
 {
 static int
 test_memzone_free(void)
 {
-       const struct rte_memzone *mz[RTE_MAX_MEMZONE];
+       const struct rte_memzone *mz[RTE_MAX_MEMZONE + 1];
        int i;
        char name[20];
 
        int i;
        char name[20];
 
-       mz[0] = rte_memzone_reserve("tempzone0", 2000, SOCKET_ID_ANY, 0);
-       mz[1] = rte_memzone_reserve("tempzone1", 4000, SOCKET_ID_ANY, 0);
+       mz[0] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone0"), 2000,
+                       SOCKET_ID_ANY, 0);
+       mz[1] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone1"), 4000,
+                       SOCKET_ID_ANY, 0);
 
        if (mz[0] > mz[1])
                return -1;
 
        if (mz[0] > mz[1])
                return -1;
-       if (!rte_memzone_lookup("tempzone0"))
+       if (!rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone0")))
                return -1;
                return -1;
-       if (!rte_memzone_lookup("tempzone1"))
+       if (!rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone1")))
                return -1;
 
        if (rte_memzone_free(mz[0])) {
                printf("Fail memzone free - tempzone0\n");
                return -1;
        }
                return -1;
 
        if (rte_memzone_free(mz[0])) {
                printf("Fail memzone free - tempzone0\n");
                return -1;
        }
-       if (rte_memzone_lookup("tempzone0")) {
+       if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone0"))) {
                printf("Found previously free memzone - tempzone0\n");
                return -1;
        }
                printf("Found previously free memzone - tempzone0\n");
                return -1;
        }
-       mz[2] = rte_memzone_reserve("tempzone2", 2000, SOCKET_ID_ANY, 0);
+       mz[2] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone2"), 2000,
+                       SOCKET_ID_ANY, 0);
 
        if (mz[2] > mz[1]) {
                printf("tempzone2 should have gotten the free entry from tempzone0\n");
 
        if (mz[2] > mz[1]) {
                printf("tempzone2 should have gotten the free entry from tempzone0\n");
@@ -807,7 +866,7 @@ test_memzone_free(void)
                printf("Fail memzone free - tempzone2\n");
                return -1;
        }
                printf("Fail memzone free - tempzone2\n");
                return -1;
        }
-       if (rte_memzone_lookup("tempzone2")) {
+       if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone2"))) {
                printf("Found previously free memzone - tempzone2\n");
                return -1;
        }
                printf("Found previously free memzone - tempzone2\n");
                return -1;
        }
@@ -815,14 +874,15 @@ test_memzone_free(void)
                printf("Fail memzone free - tempzone1\n");
                return -1;
        }
                printf("Fail memzone free - tempzone1\n");
                return -1;
        }
-       if (rte_memzone_lookup("tempzone1")) {
+       if (rte_memzone_lookup(TEST_MEMZONE_NAME("tempzone1"))) {
                printf("Found previously free memzone - tempzone1\n");
                return -1;
        }
 
        i = 0;
        do {
                printf("Found previously free memzone - tempzone1\n");
                return -1;
        }
 
        i = 0;
        do {
-               snprintf(name, sizeof(name), "tempzone%u", i);
+               snprintf(name, sizeof(name), TEST_MEMZONE_NAME("tempzone%u"),
+                               i);
                mz[i] = rte_memzone_reserve(name, 1, SOCKET_ID_ANY, 0);
        } while (mz[i++] != NULL);
 
                mz[i] = rte_memzone_reserve(name, 1, SOCKET_ID_ANY, 0);
        } while (mz[i++] != NULL);
 
@@ -830,7 +890,8 @@ test_memzone_free(void)
                printf("Fail memzone free - tempzone0\n");
                return -1;
        }
                printf("Fail memzone free - tempzone0\n");
                return -1;
        }
-       mz[0] = rte_memzone_reserve("tempzone0new", 0, SOCKET_ID_ANY, 0);
+       mz[0] = rte_memzone_reserve(TEST_MEMZONE_NAME("tempzone0new"), 0,
+                       SOCKET_ID_ANY, 0);
 
        if (mz[0] == NULL) {
                printf("Fail to create memzone - tempzone0new - when MAX memzones were "
 
        if (mz[0] == NULL) {
                printf("Fail to create memzone - tempzone0new - when MAX memzones were "
@@ -857,16 +918,16 @@ test_memzone_basic(void)
        const struct rte_memzone *memzone4;
        const struct rte_memzone *mz;
 
        const struct rte_memzone *memzone4;
        const struct rte_memzone *mz;
 
-       memzone1 = rte_memzone_reserve("testzone1", 100,
+       memzone1 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
                                SOCKET_ID_ANY, 0);
 
                                SOCKET_ID_ANY, 0);
 
-       memzone2 = rte_memzone_reserve("testzone2", 1000,
+       memzone2 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone2"), 1000,
                                0, 0);
 
                                0, 0);
 
-       memzone3 = rte_memzone_reserve("testzone3", 1000,
+       memzone3 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone3"), 1000,
                                1, 0);
 
                                1, 0);
 
-       memzone4 = rte_memzone_reserve("testzone4", 1024,
+       memzone4 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone4"), 1024,
                                SOCKET_ID_ANY, 0);
 
        /* memzone3 may be NULL if we don't have NUMA */
                                SOCKET_ID_ANY, 0);
 
        /* memzone3 may be NULL if we don't have NUMA */
@@ -918,12 +979,12 @@ test_memzone_basic(void)
                return -1;
 
        printf("test zone lookup\n");
                return -1;
 
        printf("test zone lookup\n");
-       mz = rte_memzone_lookup("testzone1");
+       mz = rte_memzone_lookup(TEST_MEMZONE_NAME("testzone1"));
        if (mz != memzone1)
                return -1;
 
        printf("test duplcate zone name\n");
        if (mz != memzone1)
                return -1;
 
        printf("test duplcate zone name\n");
-       mz = rte_memzone_reserve("testzone1", 100,
+       mz = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
                        SOCKET_ID_ANY, 0);
        if (mz != NULL)
                return -1;
                        SOCKET_ID_ANY, 0);
        if (mz != NULL)
                return -1;
@@ -948,16 +1009,22 @@ test_memzone_basic(void)
        return 0;
 }
 
        return 0;
 }
 
-static int memzone_calk_called;
-static void memzone_walk_clb(const struct rte_memzone *mz __rte_unused,
+static int test_memzones_left;
+static int memzone_walk_cnt;
+static void memzone_walk_clb(const struct rte_memzone *mz,
                             void *arg __rte_unused)
 {
                             void *arg __rte_unused)
 {
-       memzone_calk_called = 1;
+       memzone_walk_cnt++;
+       if (!strncmp(TEST_MEMZONE_NAME(""), mz->name, RTE_MEMZONE_NAMESIZE))
+               test_memzones_left++;
 }
 
 static int
 test_memzone(void)
 {
 }
 
 static int
 test_memzone(void)
 {
+       /* take note of how many memzones were allocated before running */
+       int memzone_cnt = rte_eal_get_configuration()->mem_config->memzone_cnt;
+
        printf("test basic memzone API\n");
        if (test_memzone_basic() < 0)
                return -1;
        printf("test basic memzone API\n");
        if (test_memzone_basic() < 0)
                return -1;
@@ -995,8 +1062,10 @@ test_memzone(void)
                return -1;
 
        printf("check memzone cleanup\n");
                return -1;
 
        printf("check memzone cleanup\n");
+       memzone_walk_cnt = 0;
+       test_memzones_left = 0;
        rte_memzone_walk(memzone_walk_clb, NULL);
        rte_memzone_walk(memzone_walk_clb, NULL);
-       if (memzone_calk_called) {
+       if (memzone_walk_cnt != memzone_cnt || test_memzones_left > 0) {
                printf("there are some memzones left after test\n");
                rte_memzone_dump(stdout);
                return -1;
                printf("there are some memzones left after test\n");
                rte_memzone_dump(stdout);
                return -1;
index afab180..e0db34d 100644 (file)
@@ -321,10 +321,10 @@ alloc_lcore(uint16_t socketid)
        return (uint16_t)-1;
 }
 
        return (uint16_t)-1;
 }
 
-volatile uint64_t stop;
-uint64_t count;
-uint64_t drop;
-uint64_t idle;
+static volatile uint64_t stop;
+static uint64_t count;
+static uint64_t drop;
+static uint64_t idle;
 
 static void
 reset_count(void)
 
 static void
 reset_count(void)
@@ -557,7 +557,7 @@ main_loop(__rte_unused void *args)
        return 0;
 }
 
        return 0;
 }
 
-rte_atomic64_t start;
+static rte_atomic64_t start;
 
 static inline int
 poll_burst(void *args)
 
 static inline int
 poll_burst(void *args)
index e8a0a2f..26dab0c 100644 (file)
@@ -362,9 +362,20 @@ test_setup(void)
        return 0;
 }
 
        return 0;
 }
 
+static void
+test_teardown(void)
+{
+       rte_reorder_free(test_params->b);
+       test_params->b = NULL;
+       rte_mempool_free(test_params->p);
+       test_params->p = NULL;
+}
+
+
 static struct unit_test_suite reorder_test_suite  = {
 
        .setup = test_setup,
 static struct unit_test_suite reorder_test_suite  = {
 
        .setup = test_setup,
+       .teardown = test_teardown,
        .suite_name = "Reorder Unit Test Suite",
        .unit_test_cases = {
                TEST_CASE(test_reorder_create),
        .suite_name = "Reorder Unit Test Suite",
        .unit_test_cases = {
                TEST_CASE(test_reorder_create),
index 320c20c..b4e4dae 100644 (file)
@@ -60,9 +60,6 @@
  */
 static const volatile unsigned bulk_sizes[] = { 8, 32 };
 
  */
 static const volatile unsigned bulk_sizes[] = { 8, 32 };
 
-/* The ring structure used for tests */
-static struct rte_ring *r;
-
 struct lcore_pair {
        unsigned c1, c2;
 };
 struct lcore_pair {
        unsigned c1, c2;
 };
@@ -143,7 +140,7 @@ get_two_sockets(struct lcore_pair *lcp)
 
 /* Get cycle counts for dequeuing from an empty ring. Should be 2 or 3 cycles */
 static void
 
 /* Get cycle counts for dequeuing from an empty ring. Should be 2 or 3 cycles */
 static void
-test_empty_dequeue(void)
+test_empty_dequeue(struct rte_ring *r)
 {
        const unsigned iter_shift = 26;
        const unsigned iterations = 1<<iter_shift;
 {
        const unsigned iter_shift = 26;
        const unsigned iterations = 1<<iter_shift;
@@ -171,6 +168,7 @@ test_empty_dequeue(void)
  * and return two. Input = burst size, output = cycle average for sp/sc & mp/mc
  */
 struct thread_params {
  * and return two. Input = burst size, output = cycle average for sp/sc & mp/mc
  */
 struct thread_params {
+       struct rte_ring *r;
        unsigned size;        /* input value, the burst size */
        double spsc, mpmc;    /* output value, the single or multi timings */
 };
        unsigned size;        /* input value, the burst size */
        double spsc, mpmc;    /* output value, the single or multi timings */
 };
@@ -185,6 +183,7 @@ enqueue_bulk(void *p)
        const unsigned iter_shift = 23;
        const unsigned iterations = 1<<iter_shift;
        struct thread_params *params = p;
        const unsigned iter_shift = 23;
        const unsigned iterations = 1<<iter_shift;
        struct thread_params *params = p;
+       struct rte_ring *r = params->r;
        const unsigned size = params->size;
        unsigned i;
        void *burst[MAX_BURST] = {0};
        const unsigned size = params->size;
        unsigned i;
        void *burst[MAX_BURST] = {0};
@@ -220,6 +219,7 @@ dequeue_bulk(void *p)
        const unsigned iter_shift = 23;
        const unsigned iterations = 1<<iter_shift;
        struct thread_params *params = p;
        const unsigned iter_shift = 23;
        const unsigned iterations = 1<<iter_shift;
        struct thread_params *params = p;
+       struct rte_ring *r = params->r;
        const unsigned size = params->size;
        unsigned i;
        void *burst[MAX_BURST] = {0};
        const unsigned size = params->size;
        unsigned i;
        void *burst[MAX_BURST] = {0};
@@ -250,7 +250,7 @@ dequeue_bulk(void *p)
  * used to measure ring perf between hyperthreads, cores and sockets.
  */
 static void
  * used to measure ring perf between hyperthreads, cores and sockets.
  */
 static void
-run_on_core_pair(struct lcore_pair *cores,
+run_on_core_pair(struct lcore_pair *cores, struct rte_ring *r,
                lcore_function_t f1, lcore_function_t f2)
 {
        struct thread_params param1 = {0}, param2 = {0};
                lcore_function_t f1, lcore_function_t f2)
 {
        struct thread_params param1 = {0}, param2 = {0};
@@ -258,6 +258,7 @@ run_on_core_pair(struct lcore_pair *cores,
        for (i = 0; i < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); i++) {
                lcore_count = 0;
                param1.size = param2.size = bulk_sizes[i];
        for (i = 0; i < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); i++) {
                lcore_count = 0;
                param1.size = param2.size = bulk_sizes[i];
+               param1.r = param2.r = r;
                if (cores->c1 == rte_get_master_lcore()) {
                        rte_eal_remote_launch(f2, &param2, cores->c2);
                        f1(&param1);
                if (cores->c1 == rte_get_master_lcore()) {
                        rte_eal_remote_launch(f2, &param2, cores->c2);
                        f1(&param1);
@@ -280,7 +281,7 @@ run_on_core_pair(struct lcore_pair *cores,
  * takes on a single lcore. Result is for comparison with the bulk enq+deq.
  */
 static void
  * takes on a single lcore. Result is for comparison with the bulk enq+deq.
  */
 static void
-test_single_enqueue_dequeue(void)
+test_single_enqueue_dequeue(struct rte_ring *r)
 {
        const unsigned iter_shift = 24;
        const unsigned iterations = 1<<iter_shift;
 {
        const unsigned iter_shift = 24;
        const unsigned iterations = 1<<iter_shift;
@@ -313,7 +314,7 @@ test_single_enqueue_dequeue(void)
  * as for the bulk function called on a single lcore.
  */
 static void
  * as for the bulk function called on a single lcore.
  */
 static void
-test_burst_enqueue_dequeue(void)
+test_burst_enqueue_dequeue(struct rte_ring *r)
 {
        const unsigned iter_shift = 23;
        const unsigned iterations = 1<<iter_shift;
 {
        const unsigned iter_shift = 23;
        const unsigned iterations = 1<<iter_shift;
@@ -347,7 +348,7 @@ test_burst_enqueue_dequeue(void)
 
 /* Times enqueue and dequeue on a single lcore */
 static void
 
 /* Times enqueue and dequeue on a single lcore */
 static void
-test_bulk_enqueue_dequeue(void)
+test_bulk_enqueue_dequeue(struct rte_ring *r)
 {
        const unsigned iter_shift = 23;
        const unsigned iterations = 1<<iter_shift;
 {
        const unsigned iter_shift = 23;
        const unsigned iterations = 1<<iter_shift;
@@ -385,32 +386,35 @@ static int
 test_ring_perf(void)
 {
        struct lcore_pair cores;
 test_ring_perf(void)
 {
        struct lcore_pair cores;
+       struct rte_ring *r = NULL;
+
        r = rte_ring_create(RING_NAME, RING_SIZE, rte_socket_id(), 0);
        r = rte_ring_create(RING_NAME, RING_SIZE, rte_socket_id(), 0);
-       if (r == NULL && (r = rte_ring_lookup(RING_NAME)) == NULL)
+       if (r == NULL)
                return -1;
 
        printf("### Testing single element and burst enq/deq ###\n");
                return -1;
 
        printf("### Testing single element and burst enq/deq ###\n");
-       test_single_enqueue_dequeue();
-       test_burst_enqueue_dequeue();
+       test_single_enqueue_dequeue(r);
+       test_burst_enqueue_dequeue(r);
 
        printf("\n### Testing empty dequeue ###\n");
 
        printf("\n### Testing empty dequeue ###\n");
-       test_empty_dequeue();
+       test_empty_dequeue(r);
 
        printf("\n### Testing using a single lcore ###\n");
 
        printf("\n### Testing using a single lcore ###\n");
-       test_bulk_enqueue_dequeue();
+       test_bulk_enqueue_dequeue(r);
 
        if (get_two_hyperthreads(&cores) == 0) {
                printf("\n### Testing using two hyperthreads ###\n");
 
        if (get_two_hyperthreads(&cores) == 0) {
                printf("\n### Testing using two hyperthreads ###\n");
-               run_on_core_pair(&cores, enqueue_bulk, dequeue_bulk);
+               run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
        }
        if (get_two_cores(&cores) == 0) {
                printf("\n### Testing using two physical cores ###\n");
        }
        if (get_two_cores(&cores) == 0) {
                printf("\n### Testing using two physical cores ###\n");
-               run_on_core_pair(&cores, enqueue_bulk, dequeue_bulk);
+               run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
        }
        if (get_two_sockets(&cores) == 0) {
                printf("\n### Testing using two NUMA nodes ###\n");
        }
        if (get_two_sockets(&cores) == 0) {
                printf("\n### Testing using two NUMA nodes ###\n");
-               run_on_core_pair(&cores, enqueue_bulk, dequeue_bulk);
+               run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
        }
        }
+       rte_ring_free(r);
        return 0;
 }
 
        return 0;
 }
 
index 1faa0a6..31eac11 100644 (file)
@@ -82,6 +82,14 @@ uint64_t pipeline_test_hash(void *key,
        return signature;
 }
 
        return signature;
 }
 
+static void
+app_free_resources(void) {
+       int i;
+       for (i = 0; i < N_PORTS; i++)
+               rte_ring_free(rings_rx[i]);
+       rte_mempool_free(pool);
+}
+
 static void
 app_init_mbuf_pools(void)
 {
 static void
 app_init_mbuf_pools(void)
 {
@@ -141,18 +149,20 @@ app_init_rings(void)
 static int
 test_table(void)
 {
 static int
 test_table(void)
 {
-       int status, failures;
+       int status, ret;
        unsigned i;
 
        unsigned i;
 
-       failures = 0;
+       ret = TEST_SUCCESS;
 
        app_init_rings();
        app_init_mbuf_pools();
 
        printf("\n\n\n\n************Pipeline tests************\n");
 
 
        app_init_rings();
        app_init_mbuf_pools();
 
        printf("\n\n\n\n************Pipeline tests************\n");
 
-       if (test_table_pipeline() < 0)
-               return -1;
+       if (test_table_pipeline() < 0) {
+               ret = TEST_FAILED;
+               goto end;
+       }
 
        printf("\n\n\n\n************Port tests************\n");
        for (i = 0; i < n_port_tests; i++) {
 
        printf("\n\n\n\n************Port tests************\n");
        for (i = 0; i < n_port_tests; i++) {
@@ -160,8 +170,8 @@ test_table(void)
                if (status < 0) {
                        printf("\nPort test number %d failed (%d).\n", i,
                                status);
                if (status < 0) {
                        printf("\nPort test number %d failed (%d).\n", i,
                                status);
-                       failures++;
-                       return -1;
+                       ret = TEST_FAILED;
+                       goto end;
                }
        }
 
                }
        }
 
@@ -171,8 +181,8 @@ test_table(void)
                if (status < 0) {
                        printf("\nTable test number %d failed (%d).\n", i,
                                status);
                if (status < 0) {
                        printf("\nTable test number %d failed (%d).\n", i,
                                status);
-                       failures++;
-                       return -1;
+                       ret = TEST_FAILED;
+                       goto end;
                }
        }
 
                }
        }
 
@@ -182,21 +192,23 @@ test_table(void)
                if (status < 0) {
                        printf("\nCombined table test number %d failed with "
                                "reason number %d.\n", i, status);
                if (status < 0) {
                        printf("\nCombined table test number %d failed with "
                                "reason number %d.\n", i, status);
-                       failures++;
-                       return -1;
+                       ret = TEST_FAILED;
+                       goto end;
                }
        }
 
                }
        }
 
-       if (failures)
-               return -1;
-
 #ifdef RTE_LIBRTE_ACL
        printf("\n\n\n\n************ACL tests************\n");
 #ifdef RTE_LIBRTE_ACL
        printf("\n\n\n\n************ACL tests************\n");
-       if (test_table_acl() < 0)
-               return -1;
+       if (test_table_acl() < 0) {
+               ret = TEST_FAILED;
+               goto end;
+       }
 #endif
 
 #endif
 
-       return 0;
+end:
+       app_free_resources();
+
+       return ret;
 }
 
 REGISTER_TEST_COMMAND(table_autotest, test_table);
 }
 
 REGISTER_TEST_COMMAND(table_autotest, test_table);
index b3bfda4..a5d44c5 100644 (file)
@@ -532,6 +532,8 @@ setup_acl_pipeline(void)
                struct rte_pipeline_table_entry *table_entries[5];
                int key_found[5];
 
                struct rte_pipeline_table_entry *table_entries[5];
                int key_found[5];
 
+               memset(table_entries, 0, sizeof(table_entries));
+
                for (n = 0; n < 5; n++) {
                        memset(&keys[n], 0, sizeof(struct rte_table_acl_rule_delete_params));
                        key_array[n] = &keys[n];
                for (n = 0; n < 5; n++) {
                        memset(&keys[n], 0, sizeof(struct rte_table_acl_rule_delete_params));
                        key_array[n] = &keys[n];
index fa77efb..f6bbdf3 100644 (file)
@@ -155,6 +155,7 @@ test_timer_perf(void)
        printf("Time per rte_timer_manage with zero callbacks: %"PRIu64" cycles\n",
                        (end_tsc - start_tsc + iterations/2) / iterations);
 
        printf("Time per rte_timer_manage with zero callbacks: %"PRIu64" cycles\n",
                        (end_tsc - start_tsc + iterations/2) / iterations);
 
+       rte_free(tms);
        return 0;
 }
 
        return 0;
 }
 
index df10a2f..79bb343 100644 (file)
@@ -158,7 +158,8 @@ static int parse_elf(struct elf_info *info, const char *filename)
                 * There are more than 64k sections,
                 * read count from .sh_size.
                 */
                 * There are more than 64k sections,
                 * read count from .sh_size.
                 */
-               info->num_sections = TO_NATIVE(endian, 32, sechdrs[0].sh_size);
+               info->num_sections =
+                       TO_NATIVE(endian, ADDR_SIZE, sechdrs[0].sh_size);
        } else {
                info->num_sections = hdr->e_shnum;
        }
        } else {
                info->num_sections = hdr->e_shnum;
        }
@@ -181,7 +182,7 @@ static int parse_elf(struct elf_info *info, const char *filename)
                sechdrs[i].sh_offset    =
                        TO_NATIVE(endian, ADDR_SIZE, sechdrs[i].sh_offset);
                sechdrs[i].sh_size      =
                sechdrs[i].sh_offset    =
                        TO_NATIVE(endian, ADDR_SIZE, sechdrs[i].sh_offset);
                sechdrs[i].sh_size      =
-                       TO_NATIVE(endian, 32, sechdrs[i].sh_size);
+                       TO_NATIVE(endian, ADDR_SIZE, sechdrs[i].sh_size);
                sechdrs[i].sh_link      =
                        TO_NATIVE(endian, 32, sechdrs[i].sh_link);
                sechdrs[i].sh_info      =
                sechdrs[i].sh_link      =
                        TO_NATIVE(endian, 32, sechdrs[i].sh_link);
                sechdrs[i].sh_info      =
index 4bff83a..2d4a47f 100644 (file)
@@ -583,6 +583,11 @@ CONFIG_RTE_INSECURE_FUNCTION_WARNING=n
 CONFIG_RTE_APP_TEST=y
 CONFIG_RTE_APP_TEST_RESOURCE_TAR=n
 
 CONFIG_RTE_APP_TEST=y
 CONFIG_RTE_APP_TEST_RESOURCE_TAR=n
 
+#
+# Compile the procinfo application
+#
+CONFIG_RTE_PROC_INFO=n
+
 #
 # Compile the PMD test application
 #
 #
 # Compile the PMD test application
 #
index 2483dfa..4339493 100644 (file)
@@ -44,3 +44,4 @@ CONFIG_RTE_LIBRTE_PMD_VHOST=y
 CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
 CONFIG_RTE_LIBRTE_POWER=y
 CONFIG_RTE_VIRTIO_USER=y
 CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
 CONFIG_RTE_LIBRTE_POWER=y
 CONFIG_RTE_VIRTIO_USER=y
+CONFIG_RTE_PROC_INFO=y
index 10e5473..4d44d52 100644 (file)
@@ -34,7 +34,7 @@ AESN-NI Multi Buffer Crytpo Poll Mode Driver
 The AESNI MB PMD (**librte_pmd_aesni_mb**) provides poll mode crypto driver
 support for utilizing Intel multi buffer library, see the white paper
 `Fast Multi-buffer IPsec Implementations on Intel® Architecture Processors
 The AESNI MB PMD (**librte_pmd_aesni_mb**) provides poll mode crypto driver
 support for utilizing Intel multi buffer library, see the white paper
 `Fast Multi-buffer IPsec Implementations on Intel® Architecture Processors
-<https://www-ssl.intel.com/content/www/us/en/intelligent-systems/intel-technology/fast-multi-buffer-ipsec-implementations-ia-processors-paper.html?wapkw=multi+buffer>`_.
+<https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-multi-buffer-ipsec-implementations-ia-processors-paper.pdf>`_.
 
 The AES-NI MB PMD has current only been tested on Fedora 21 64-bit with gcc.
 
 
 The AES-NI MB PMD has current only been tested on Fedora 21 64-bit with gcc.
 
index 0d143bc..36ac337 100644 (file)
@@ -46,3 +46,4 @@ Linux VFIO           = Y
 x86-32               = Y
 x86-64               = Y
 ARMv8                = Y
 x86-32               = Y
 x86-64               = Y
 ARMv8                = Y
+Power8               = Y
index edd6b71..5ec4088 100644 (file)
@@ -38,3 +38,4 @@ Linux VFIO           = Y
 x86-32               = Y
 x86-64               = Y
 ARMv8                = Y
 x86-32               = Y
 x86-64               = Y
 ARMv8                = Y
+Power8               = Y
index 5780268..556b55f 100644 (file)
@@ -130,6 +130,21 @@ Please note that enabling debugging options may affect system performance.
   Interrupt Throttling interval.
 
 
   Interrupt Throttling interval.
 
 
+Runtime Config Options
+~~~~~~~~~~~~~~~~~~~~~~
+
+- ``Support multiple driver`` (default ``disable``)
+
+  There was a multiple driver support issue during use of 700 series Ethernet
+  Adapter with both Linux kernel and DPDK PMD. To fix this issue, ``devargs``
+  parameter ``support-multi-driver`` is introduced, for example::
+
+    -w 84:00.0,support-multi-driver=1
+
+  With the above configuration, DPDK PMD will not change global registers, and
+  will switch PF interrupt from IntN to Int0 to avoid interrupt conflict between
+  DPDK and Linux Kernel.
+
 Driver Compilation
 ~~~~~~~~~~~~~~~~~~
 
 Driver Compilation
 ~~~~~~~~~~~~~~~~~~
 
@@ -459,3 +474,15 @@ Receive packets with Ethertype 0x88A8
 
 Due to the FW limitation, PF can receive packets with Ethertype 0x88A8
 only when floating VEB is disabled.
 
 Due to the FW limitation, PF can receive packets with Ethertype 0x88A8
 only when floating VEB is disabled.
+
+Global configuration warning
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+I40E PMD will set some global registers to enable some function or set some
+configure. Then when using different ports of the same NIC with Linux kernel
+and DPDK, the port with Linux kernel will be impacted by the port with DPDK.
+For example, register I40E_GL_SWT_L2TAGCTRL is used to control L2 tag, i40e
+PMD uses I40E_GL_SWT_L2TAGCTRL to set vlan TPID. If setting TPID in port A
+with DPDK, then the configuration will also impact port B in the NIC with
+kernel driver, which don't want to use the TPID.
+So PMD reports warning to clarify what is changed by writing global register.
index ec6c661..3289c3c 100644 (file)
@@ -1028,3 +1028,135 @@ Fixes in 16.11 LTS Release
 * uio: fix compilation with -Og
 * usertools: fix device binding with python 3
 * vfio: fix close unchecked file descriptor
 * uio: fix compilation with -Og
 * usertools: fix device binding with python 3
 * vfio: fix close unchecked file descriptor
+
+16.11.5
+~~~~~~~
+
+* app/procinfo: add compilation option in config
+* app/testpmd: fix crash of txonly with multiple segments
+* app/testpmd: fix flow director filter
+* app/testpmd: fix port index in RSS forward config
+* app/testpmd: fix port topology in RSS forward config
+* bus/pci: fix interrupt handler type
+* contigmem: fix build on FreeBSD 12
+* crypto/qat: fix allocation check and leak
+* crypto/qat: fix null auth algo overwrite
+* doc: fix outdated link to IPsec white paper
+* eal/ppc: remove the braces in memory barrier macros
+* eal/ppc: support sPAPR IOMMU for vfio-pci
+* eal: update assertion macro
+* eal/x86: use lock-prefixed instructions for SMP barrier
+* ethdev: fix data alignment
+* ethdev: fix link autonegotiation value
+* ethdev: fix missing imissed counter in xstats
+* ethdev: fix typo in functions comment
+* examples/bond: check mbuf allocation
+* examples/exception_path: align stats on cache line
+* examples/ip_pipeline: fix timer period unit
+* examples/ipsec-secgw: fix corner case for SPI value
+* examples/l3fwd-power: fix frequency detection
+* examples/l3fwd-power: fix Rx without interrupt
+* examples/vhost: fix sending ARP packet to self
+* examples/vhost: fix startup check
+* igb_uio: fix IRQ disable on recent kernels
+* igb_uio: fix MSI-X IRQ assignment with new IRQ function
+* igb_uio: switch to new irq function for MSI-X
+* keepalive: fix state alignment
+* kni: fix build with kernel 4.15
+* lpm: fix ARM big endian build
+* malloc: fix end for bounded elements
+* malloc: protect stats with lock
+* mbuf: cleanup function to get last segment
+* mbuf: fix NULL freeing when debug enabled
+* mem: fix mmap error check on huge page attach
+* memzone: fix leak on allocation error
+* mk: fix external build
+* mk: support renamed Makefile in external project
+* net/bnxt: fix broadcast cofiguration
+* net/bnxt: fix group info usage
+* net/bnxt: fix headroom initialization
+* net/bnxt: fix link speed setting with autoneg off
+* net/bnxt: fix Rx checksum flags
+* net/bnxt: fix size of Tx ring in HW
+* net/bnxt: parse checksum offload flags
+* net/bnxt: support new PCI IDs
+* net/bonding: check error of MAC address setting
+* net/bonding: fix activated slave in 8023ad mode
+* net/bonding: fix setting slave MAC addresses
+* net/e1000: fix mailbox interrupt handler
+* net/e1000: fix VF Rx interrupt enabling
+* net/ena: do not set Tx L4 offloads in Rx path
+* net/enic: fix crash due to static max number of queues
+* net/fm10k: fix logical port delete
+* net/i40e: add debug logs when writing global registers
+* net/i40e: add warnings when writing global registers
+* net/i40e/base: fix compile issue for GCC 6.3
+* net/i40e/base: fix link LED blink
+* net/i40e/base: fix NVM lock
+* net/i40e: check multi-driver option parsing
+* net/i40e: fix ARM big endian build
+* net/i40e: fix flag for MAC address write
+* net/i40e: fix flow director Rx resource defect
+* net/i40e: fix interrupt conflict when using multi-driver
+* net/i40e: fix multiple driver support issue
+* net/i40e: fix Rx interrupt
+* net/i40e: fix VF reset stats crash
+* net/i40e: fix VF Rx interrupt enabling
+* net/i40e: fix VLAN offload setting
+* net/i40e: fix VSI MAC filter on primary address change
+* net/i40e: implement vector PMD for altivec
+* net/igb: fix Tx queue number assignment
+* net/ixgbe/base: add media type of fixed fiber
+* net/ixgbe: fix ARM big endian build
+* net/ixgbe: fix mailbox interrupt handler
+* net/ixgbe: fix max queue number for VF
+* net/ixgbe: fix reset error handling
+* net/ixgbe: fix the failure of number of Tx queue check
+* net/ixgbe: fix VF Rx interrupt enabling
+* net/ixgbe: improve link state check on VF
+* net/mlx5: fix deadlock of link status alarm
+* net/mlx5: fix missing RSS capability
+* net/mlx5: fix MTU update
+* net/nfp: fix CRC strip check behaviour
+* net/nfp: fix jumbo settings
+* net/nfp: fix MTU settings
+* net/pcap: fix the NUMA id display in logs
+* net/qede/base: fix VF LRO tunnel configuration
+* net/qede: fix clearing of queue stats
+* net/qede: fix few log messages
+* net/qede: fix MTU set and max Rx pkt len usage
+* net/qede: fix to reject config with no Rx queue
+* net/szedata2: fix check of mmap return value
+* net/thunderx: fix multi segment Tx function return
+* net/vhost: fix log messages on create/destroy
+* net/virtio: fix incorrect cast
+* net/virtio: fix mbuf data offset for simple Rx
+* net/virtio: fix memory leak when reinitializing device
+* net/virtio: fix queue flushing with vector Rx enabled
+* net/virtio: fix resuming port with Rx vector path
+* net/virtio: fix Rx and Tx handler selection for ARM32
+* net/virtio: fix typo in function name
+* net/virtio: fix vector Rx flushing
+* net/virtio-user: fix start with kernel vhost
+* pdump: fix error check when creating/canceling thread
+* pmdinfogen: fix cross compilation for ARM big endian
+* test/crypto: fix missing include
+* test/memzone: fix freeing test
+* test/memzone: fix NULL freeing
+* test/memzone: fix wrong test
+* test/memzone: handle previously allocated memzones
+* test/pmd_perf: declare variables as static
+* test: register test as failed if setup failed
+* test/reorder: fix memory leak
+* test/ring_perf: fix memory leak
+* test/table: fix memory leak
+* test/table: fix uninitialized parameter
+* test/timer_perf: fix memory leak
+* usertools/devbind: remove unused function
+* vfio: fix enabled check on error
+* vhost: do not take lock on owner reset
+* vhost: fix crash
+* vhost: fix dequeue zero copy with virtio1
+* vhost: fix error code check when creating thread
+* vhost: fix mbuf free
+* vhost: protect active rings from async ring changes
index 3897377..4876927 100644 (file)
@@ -186,5 +186,5 @@ The rte_keepalive_mark_alive function simply sets the core state to alive.
     static inline void
     rte_keepalive_mark_alive(struct rte_keepalive *keepcfg)
     {
     static inline void
     rte_keepalive_mark_alive(struct rte_keepalive *keepcfg)
     {
-        keepcfg->state_flags[rte_lcore_id()] = ALIVE;
+        keepcfg->live_data[rte_lcore_id()].core_state = RTE_KA_STATE_ALIVE;
     }
     }
index 1c4b184..ac9d822 100644 (file)
@@ -334,6 +334,11 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
 
                in = rte_zmalloc("working mem for key",
                                ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
 
                in = rte_zmalloc("working mem for key",
                                ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+               if (in == NULL) {
+                       PMD_DRV_LOG(ERR, "Failed to alloc memory");
+                       return -ENOMEM;
+               }
+
                rte_memcpy(in, qat_aes_xcbc_key_seed,
                                ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
                for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
                rte_memcpy(in, qat_aes_xcbc_key_seed,
                                ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
                for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
@@ -364,6 +369,11 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
                                ICP_QAT_HW_GALOIS_E_CTR0_SZ);
                in = rte_zmalloc("working mem for key",
                                ICP_QAT_HW_GALOIS_H_SZ, 16);
                                ICP_QAT_HW_GALOIS_E_CTR0_SZ);
                in = rte_zmalloc("working mem for key",
                                ICP_QAT_HW_GALOIS_H_SZ, 16);
+               if (in == NULL) {
+                       PMD_DRV_LOG(ERR, "Failed to alloc memory");
+                       return -ENOMEM;
+               }
+
                memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
                if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
                        &enc_key) != 0) {
                memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
                if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
                        &enc_key) != 0) {
index 8b830b8..a941679 100644 (file)
@@ -1085,8 +1085,9 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
                        ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
                                        qat_req->comn_hdr.serv_specif_flags,
                                        ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
                        ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
                                        qat_req->comn_hdr.serv_specif_flags,
                                        ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
-                       auth_param->auth_res_addr =
-                                       op->sym->auth.digest.phys_addr;
+                       if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
+                               auth_param->auth_res_addr =
+                                               op->sym->auth.digest.phys_addr;
                        digest_appended = 0;
                }
 
                        digest_appended = 0;
                }
 
index 45c6519..6d73f12 100644 (file)
@@ -121,7 +121,7 @@ static struct rte_eth_link pmd_link = {
        .link_speed = ETH_SPEED_NUM_10G,
        .link_duplex = ETH_LINK_FULL_DUPLEX,
        .link_status = ETH_LINK_DOWN,
        .link_speed = ETH_SPEED_NUM_10G,
        .link_duplex = ETH_LINK_FULL_DUPLEX,
        .link_status = ETH_LINK_DOWN,
-       .link_autoneg = ETH_LINK_SPEED_AUTONEG
+       .link_autoneg = ETH_LINK_AUTONEG
 };
 
 static uint16_t
 };
 
 static uint16_t
index ff3c240..b934605 100644 (file)
@@ -110,6 +110,7 @@ struct bnxt_link_info {
        uint16_t                link_speed;
        uint16_t                support_speeds;
        uint16_t                auto_link_speed;
        uint16_t                link_speed;
        uint16_t                support_speeds;
        uint16_t                auto_link_speed;
+       uint16_t                force_link_speed;
        uint16_t                auto_link_speed_mask;
        uint32_t                preemphasis;
        uint8_t                 phy_type;
        uint16_t                auto_link_speed_mask;
        uint32_t                preemphasis;
        uint8_t                 phy_type;
index b6feb5a..359a95d 100644 (file)
@@ -63,19 +63,34 @@ static const char bnxt_version[] =
 #define BROADCOM_DEV_ID_57302 0x16c9
 #define BROADCOM_DEV_ID_57304_PF 0x16ca
 #define BROADCOM_DEV_ID_57304_VF 0x16cb
 #define BROADCOM_DEV_ID_57302 0x16c9
 #define BROADCOM_DEV_ID_57304_PF 0x16ca
 #define BROADCOM_DEV_ID_57304_VF 0x16cb
+#define BROADCOM_DEV_ID_57417_MF 0x16cc
 #define BROADCOM_DEV_ID_NS2 0x16cd
 #define BROADCOM_DEV_ID_NS2 0x16cd
+#define BROADCOM_DEV_ID_57311 0x16ce
+#define BROADCOM_DEV_ID_57312 0x16cf
 #define BROADCOM_DEV_ID_57402 0x16d0
 #define BROADCOM_DEV_ID_57404 0x16d1
 #define BROADCOM_DEV_ID_57406_PF 0x16d2
 #define BROADCOM_DEV_ID_57406_VF 0x16d3
 #define BROADCOM_DEV_ID_57402_MF 0x16d4
 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
 #define BROADCOM_DEV_ID_57402 0x16d0
 #define BROADCOM_DEV_ID_57404 0x16d1
 #define BROADCOM_DEV_ID_57406_PF 0x16d2
 #define BROADCOM_DEV_ID_57406_VF 0x16d3
 #define BROADCOM_DEV_ID_57402_MF 0x16d4
 #define BROADCOM_DEV_ID_57407_RJ45 0x16d5
+#define BROADCOM_DEV_ID_57412 0x16d6
+#define BROADCOM_DEV_ID_57414 0x16d7
+#define BROADCOM_DEV_ID_57416_RJ45 0x16d8
+#define BROADCOM_DEV_ID_57417_RJ45 0x16d9
 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
 #define BROADCOM_DEV_ID_5741X_VF 0x16dc
+#define BROADCOM_DEV_ID_57412_MF 0x16de
+#define BROADCOM_DEV_ID_57314 0x16df
+#define BROADCOM_DEV_ID_57317_RJ45 0x16e0
 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
 #define BROADCOM_DEV_ID_5731X_VF 0x16e1
+#define BROADCOM_DEV_ID_57417_SFP 0x16e2
+#define BROADCOM_DEV_ID_57416_SFP 0x16e3
+#define BROADCOM_DEV_ID_57317_SFP 0x16e4
 #define BROADCOM_DEV_ID_57404_MF 0x16e7
 #define BROADCOM_DEV_ID_57406_MF 0x16e8
 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
 #define BROADCOM_DEV_ID_57407_MF 0x16ea
 #define BROADCOM_DEV_ID_57404_MF 0x16e7
 #define BROADCOM_DEV_ID_57406_MF 0x16e8
 #define BROADCOM_DEV_ID_57407_SFP 0x16e9
 #define BROADCOM_DEV_ID_57407_MF 0x16ea
+#define BROADCOM_DEV_ID_57414_MF 0x16ec
+#define BROADCOM_DEV_ID_57416_MF 0x16ee
 
 static struct rte_pci_id bnxt_pci_id_map[] = {
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
 
 static struct rte_pci_id bnxt_pci_id_map[] = {
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
@@ -95,6 +110,21 @@ static struct rte_pci_id bnxt_pci_id_map[] = {
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
        { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
        { .vendor_id = 0, /* sentinel */ },
 };
 
        { .vendor_id = 0, /* sentinel */ },
 };
 
@@ -285,7 +315,9 @@ static int bnxt_init_nic(struct bnxt *bp)
 {
        int rc;
 
 {
        int rc;
 
-       bnxt_init_ring_grps(bp);
+       rc = bnxt_init_ring_grps(bp);
+       if (rc)
+               return rc;
        bnxt_init_vnics(bp);
        bnxt_init_filters(bp);
 
        bnxt_init_vnics(bp);
        bnxt_init_filters(bp);
 
index 619bc97..8ff4c15 100644 (file)
@@ -174,9 +174,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
         * by ethtool.
         */
        if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
         * by ethtool.
         */
        if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
-               mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
        if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
        if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
-               mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
+               mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
        req.mask = rte_cpu_to_le_32(mask);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
        req.mask = rte_cpu_to_le_32(mask);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -504,7 +504,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
                                HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
                }
                /* AutoNeg - Advertise speeds specified. */
                                HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
                }
                /* AutoNeg - Advertise speeds specified. */
-               if (conf->auto_link_speed_mask) {
+               if (conf->auto_link_speed_mask &&
+                   !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
                        req.auto_mode =
                                HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
                        req.auto_link_speed_mask =
                        req.auto_mode =
                                HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
                        req.auto_link_speed_mask =
@@ -566,6 +567,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
        link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
        link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
        link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
        link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
        link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
        link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
+       link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
        link_info->phy_ver[0] = resp->phy_maj;
        link_info->phy_ver[1] = resp->phy_min;
        link_info->phy_ver[2] = resp->phy_bld;
        link_info->phy_ver[0] = resp->phy_maj;
        link_info->phy_ver[1] = resp->phy_min;
        link_info->phy_ver[2] = resp->phy_bld;
@@ -604,7 +606,7 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                         struct bnxt_ring *ring,
                         uint32_t ring_type, uint32_t map_index,
 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                         struct bnxt_ring *ring,
                         uint32_t ring_type, uint32_t map_index,
-                        uint32_t stats_ctx_id)
+                        uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
 {
        int rc = 0;
        struct hwrm_ring_alloc_input req = {.req_type = 0 };
 {
        int rc = 0;
        struct hwrm_ring_alloc_input req = {.req_type = 0 };
@@ -625,11 +627,12 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                /* FALLTHROUGH */
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
                req.ring_type = ring_type;
                /* FALLTHROUGH */
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
                req.ring_type = ring_type;
-               req.cmpl_ring_id =
-                   rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
+               req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
                req.length = rte_cpu_to_le_32(ring->ring_size);
                req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
                req.length = rte_cpu_to_le_32(ring->ring_size);
                req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
-               req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
+               if (stats_ctx_id != INVALID_STATS_CTX_ID)
+                       req.enables =
+                       rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
                        HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
                break;
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
                        HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
                break;
        case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
@@ -796,7 +799,9 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
        HWRM_CHECK_RESULT;
 
        cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
        HWRM_CHECK_RESULT;
 
        cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
-       bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
+       //Tx rings don't need grp_info entry. It is a Rx only attribute.
+       if (idx)
+               bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
 
        return rc;
 }
 
        return rc;
 }
@@ -818,7 +823,9 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
        HWRM_CHECK_RESULT;
 
        cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
        HWRM_CHECK_RESULT;
 
        cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
-       bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
+       //Tx rings don't have a grp_info entry. It is a Rx only attribute.
+       if (idx)
+               bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
 
        return rc;
 }
 
        return rc;
 }
@@ -1025,10 +1032,13 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
        for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
                unsigned int idx = i + 1;
 
        for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
                unsigned int idx = i + 1;
 
-               if (i >= bp->rx_cp_nr_rings)
+               if (i >= bp->rx_cp_nr_rings) {
                        cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
                        cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
-               else
+                       //Tx rings don't have a grp_info entry.
+                       idx = 0;
+               } else {
                        cpr = bp->rx_queues[i]->cp_ring;
                        cpr = bp->rx_queues[i]->cp_ring;
+               }
                if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
                        rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
                        if (rc)
                if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
                        rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
                        if (rc)
@@ -1052,6 +1062,8 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
                if (i >= bp->rx_cp_nr_rings) {
                        txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
                        cpr = txq->cp_ring;
                if (i >= bp->rx_cp_nr_rings) {
                        txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
                        cpr = txq->cp_ring;
+                       //Tx rings don't need grp_info entry.
+                       idx = 0;
                } else {
                        rxq = bp->rx_queues[i];
                        cpr = rxq->cp_ring;
                } else {
                        rxq = bp->rx_queues[i];
                        cpr = rxq->cp_ring;
@@ -1089,14 +1101,13 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
 }
 
 static void bnxt_free_cp_ring(struct bnxt *bp,
 }
 
 static void bnxt_free_cp_ring(struct bnxt *bp,
-                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
+                             struct bnxt_cp_ring_info *cpr)
 {
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
        bnxt_hwrm_ring_free(bp, cp_ring,
                        HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
        cp_ring->fw_ring_id = INVALID_HW_RING_ID;
 {
        struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
        bnxt_hwrm_ring_free(bp, cp_ring,
                        HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
        cp_ring->fw_ring_id = INVALID_HW_RING_ID;
-       bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
        memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
                        sizeof(*cpr->cp_desc_ring));
        cpr->cp_raw_cons = 0;
        memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
                        sizeof(*cpr->cp_desc_ring));
        cpr->cp_raw_cons = 0;
@@ -1112,7 +1123,6 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                struct bnxt_tx_ring_info *txr = txq->tx_ring;
                struct bnxt_ring *ring = txr->tx_ring_struct;
                struct bnxt_cp_ring_info *cpr = txq->cp_ring;
                struct bnxt_tx_ring_info *txr = txq->tx_ring;
                struct bnxt_ring *ring = txr->tx_ring_struct;
                struct bnxt_cp_ring_info *cpr = txq->cp_ring;
-               unsigned int idx = bp->rx_cp_nr_rings + i + 1;
 
                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_hwrm_ring_free(bp, ring,
 
                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_hwrm_ring_free(bp, ring,
@@ -1128,7 +1138,7 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                        txr->tx_cons = 0;
                }
                if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
                        txr->tx_cons = 0;
                }
                if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
-                       bnxt_free_cp_ring(bp, cpr, idx);
+                       bnxt_free_cp_ring(bp, cpr);
        }
 
        for (i = 0; i < bp->rx_cp_nr_rings; i++) {
        }
 
        for (i = 0; i < bp->rx_cp_nr_rings; i++) {
@@ -1152,7 +1162,8 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                        rxr->rx_prod = 0;
                }
                if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
                        rxr->rx_prod = 0;
                }
                if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
-                       bnxt_free_cp_ring(bp, cpr, idx);
+                       bnxt_free_cp_ring(bp, cpr);
+               bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
        }
 
        /* Default completion ring */
        }
 
        /* Default completion ring */
@@ -1160,7 +1171,8 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
 
                if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
                struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
 
                if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
-                       bnxt_free_cp_ring(bp, cpr, 0);
+                       bnxt_free_cp_ring(bp, cpr);
+               bp->grp_info[0].cp_fw_ring_id = INVALID_HW_RING_ID;
        }
 
        return rc;
        }
 
        return rc;
@@ -1511,7 +1523,9 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
        autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
        speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
        link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
        autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
        speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
        link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
-       if (autoneg == 1) {
+       /* Autoneg can be done only when the FW allows */
+       if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
+                               bp->link_info.force_link_speed)) {
                link_req.phy_flags |=
                                HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
                link_req.auto_link_speed_mask =
                link_req.phy_flags |=
                                HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
                link_req.auto_link_speed_mask =
@@ -1529,7 +1543,13 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
                }
 
                link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
                }
 
                link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
-               link_req.link_speed = speed;
+               /* If user wants a particular speed try that first. */
+               if (speed)
+                       link_req.link_speed = speed;
+               else if (bp->link_info.force_link_speed)
+                       link_req.link_speed = bp->link_info.force_link_speed;
+               else
+                       link_req.link_speed = bp->link_info.auto_link_speed;
        }
        link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
        link_req.auto_pause = bp->link_info.auto_pause;
        }
        link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
        link_req.auto_pause = bp->link_info.auto_pause;
index 6519ef2..32c74c8 100644 (file)
@@ -65,7 +65,7 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                         struct bnxt_ring *ring,
                         uint32_t ring_type, uint32_t map_index,
 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                         struct bnxt_ring *ring,
                         uint32_t ring_type, uint32_t map_index,
-                        uint32_t stats_ctx_id);
+                        uint32_t stats_ctx_id, uint32_t cmpl_ring_id);
 int bnxt_hwrm_ring_free(struct bnxt *bp,
                        struct bnxt_ring *ring, uint32_t ring_type);
 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
 int bnxt_hwrm_ring_free(struct bnxt *bp,
                        struct bnxt_ring *ring, uint32_t ring_type);
 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx);
@@ -101,5 +101,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp);
 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
 int bnxt_hwrm_func_qcfg(struct bnxt *bp);
 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
 int bnxt_hwrm_func_qcfg(struct bnxt *bp);
+#define HWRM_RING_ALLOC_INPUT_EN_STAT_CTX_ID_VALID \
+       HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID
 
 #endif
 
 #endif
index 3f81ffc..2bceb4d 100644 (file)
@@ -61,13 +61,19 @@ void bnxt_free_ring(struct bnxt_ring *ring)
  * Ring groups
  */
 
  * Ring groups
  */
 
-void bnxt_init_ring_grps(struct bnxt *bp)
+int bnxt_init_ring_grps(struct bnxt *bp)
 {
        unsigned int i;
 
 {
        unsigned int i;
 
+       //One slot is still consumed by Default ring.
+       if (bp->max_ring_grps < 1 + bp->rx_cp_nr_rings)
+               return -ENOMEM;
+
        for (i = 0; i < bp->max_ring_grps; i++)
                memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
                       sizeof(struct bnxt_ring_grp_info));
        for (i = 0; i < bp->max_ring_grps; i++)
                memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
                       sizeof(struct bnxt_ring_grp_info));
+
+       return 0;
 }
 
 /*
 }
 
 /*
@@ -219,7 +225,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
 
                rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
                                          HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
 
                rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
                                          HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
-                                         0, HWRM_NA_SIGNATURE);
+                                         0, HWRM_NA_SIGNATURE,
+                                         HWRM_NA_SIGNATURE);
                if (rc)
                        goto err_out;
                cpr->cp_doorbell =
                if (rc)
                        goto err_out;
                cpr->cp_doorbell =
@@ -239,7 +246,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
                /* Rx cmpl */
                rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
                                        HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
                /* Rx cmpl */
                rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
                                        HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
-                                       idx, HWRM_NA_SIGNATURE);
+                                       idx, HWRM_NA_SIGNATURE,
+                                       HWRM_NA_SIGNATURE);
                if (rc)
                        goto err_out;
                cpr->cp_doorbell =
                if (rc)
                        goto err_out;
                cpr->cp_doorbell =
@@ -251,7 +259,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
                /* Rx ring */
                rc = bnxt_hwrm_ring_alloc(bp, ring,
                                        HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
                /* Rx ring */
                rc = bnxt_hwrm_ring_alloc(bp, ring,
                                        HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
-                                       idx, cpr->hw_stats_ctx_id);
+                                       idx, cpr->hw_stats_ctx_id,
+                                       cp_ring->fw_ring_id);
                if (rc)
                        goto err_out;
                rxr->rx_prod = 0;
                if (rc)
                        goto err_out;
                rxr->rx_prod = 0;
@@ -279,20 +288,21 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
                /* Tx cmpl */
                rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
                                        HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
                /* Tx cmpl */
                rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
                                        HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL,
-                                       idx, HWRM_NA_SIGNATURE);
+                                       idx, HWRM_NA_SIGNATURE,
+                                       HWRM_NA_SIGNATURE);
                if (rc)
                        goto err_out;
 
                cpr->cp_doorbell =
                    (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
                    idx * 0x80;
                if (rc)
                        goto err_out;
 
                cpr->cp_doorbell =
                    (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
                    idx * 0x80;
-               bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
                B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
 
                /* Tx ring */
                rc = bnxt_hwrm_ring_alloc(bp, ring,
                                        HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
                B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
 
                /* Tx ring */
                rc = bnxt_hwrm_ring_alloc(bp, ring,
                                        HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
-                                       idx, cpr->hw_stats_ctx_id);
+                                       idx, cpr->hw_stats_ctx_id,
+                                       cp_ring->fw_ring_id);
                if (rc)
                        goto err_out;
 
                if (rc)
                        goto err_out;
 
index 8656549..22a56eb 100644 (file)
@@ -65,6 +65,7 @@
 #define MAX_CP_DESC_CNT (16 * 1024)
 
 #define INVALID_HW_RING_ID      ((uint16_t)-1)
 #define MAX_CP_DESC_CNT (16 * 1024)
 
 #define INVALID_HW_RING_ID      ((uint16_t)-1)
+#define INVALID_STATS_CTX_ID   ((uint16_t)-1)
 
 struct bnxt_ring {
        void                    *bd;
 
 struct bnxt_ring {
        void                    *bd;
@@ -92,7 +93,7 @@ struct bnxt_tx_ring_info;
 struct bnxt_rx_ring_info;
 struct bnxt_cp_ring_info;
 void bnxt_free_ring(struct bnxt_ring *ring);
 struct bnxt_rx_ring_info;
 struct bnxt_cp_ring_info;
 void bnxt_free_ring(struct bnxt_ring *ring);
-void bnxt_init_ring_grps(struct bnxt *bp);
+int bnxt_init_ring_grps(struct bnxt *bp);
 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
                            struct bnxt_tx_ring_info *tx_ring_info,
                            struct bnxt_rx_ring_info *rx_ring_info,
 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
                            struct bnxt_tx_ring_info *tx_ring_info,
                            struct bnxt_rx_ring_info *rx_ring_info,
index 980f3ec..5698f02 100644 (file)
@@ -72,7 +72,7 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
 
        rx_buf->mbuf = data;
 
 
        rx_buf->mbuf = data;
 
-       rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
+       rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(data));
 
        return 0;
 }
 
        return 0;
 }
@@ -126,6 +126,7 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
        mbuf = rx_buf->mbuf;
        rte_prefetch0(mbuf);
 
        mbuf = rx_buf->mbuf;
        rte_prefetch0(mbuf);
 
+       mbuf->data_off = RTE_PKTMBUF_HEADROOM;
        mbuf->nb_segs = 1;
        mbuf->next = NULL;
        mbuf->pkt_len = rxcmp->len;
        mbuf->nb_segs = 1;
        mbuf->next = NULL;
        mbuf->pkt_len = rxcmp->len;
@@ -152,12 +153,12 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
        if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
                mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
        else
        if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
                mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
        else
-               mbuf->ol_flags |= PKT_RX_IP_CKSUM_NONE;
+               mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
 
        if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
                mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
        else
 
        if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
                mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
        else
-               mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+               mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
 
        if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
                /* Re-install the mbuf back to the rx ring */
 
        if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
                /* Re-install the mbuf back to the rx ring */
index c2f9ae7..9cd44a9 100644 (file)
@@ -101,7 +101,7 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
        if (ring == NULL)
                return -ENOMEM;
        txr->tx_ring_struct = ring;
        if (ring == NULL)
                return -ENOMEM;
        txr->tx_ring_struct = ring;
-       ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
+       ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
        ring->ring_mask = ring->ring_size - 1;
        ring->bd = (void *)txr->tx_desc_ring;
        ring->bd_dma = txr->tx_desc_mapping;
        ring->ring_mask = ring->ring_size - 1;
        ring->bd = (void *)txr->tx_desc_ring;
        ring->bd_dma = txr->tx_desc_mapping;
@@ -216,23 +216,28 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
                                        tx_pkt->l4_len;
                        txbd1->mss = tx_pkt->tso_segsz;
 
                                        tx_pkt->l4_len;
                        txbd1->mss = tx_pkt->tso_segsz;
 
-               } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
+               } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
+                          PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
                        /* Outer IP, Inner IP, Inner TCP/UDP CSO */
                        txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
                        txbd1->mss = 0;
                        /* Outer IP, Inner IP, Inner TCP/UDP CSO */
                        txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
                        txbd1->mss = 0;
-               } else if (tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) {
+               } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
+                          PKT_TX_IIP_TCP_UDP_CKSUM) {
                        /* (Inner) IP, (Inner) TCP/UDP CSO */
                        txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
                        txbd1->mss = 0;
                        /* (Inner) IP, (Inner) TCP/UDP CSO */
                        txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
                        txbd1->mss = 0;
-               } else if (tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) {
+               } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
+                          PKT_TX_OIP_TCP_UDP_CKSUM) {
                        /* Outer IP, (Inner) TCP/UDP CSO */
                        txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
                        txbd1->mss = 0;
                        /* Outer IP, (Inner) TCP/UDP CSO */
                        txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
                        txbd1->mss = 0;
-               } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) {
+               } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
+                          PKT_TX_OIP_IIP_CKSUM) {
                        /* Outer IP, Inner IP CSO */
                        txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
                        txbd1->mss = 0;
                        /* Outer IP, Inner IP CSO */
                        txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
                        txbd1->mss = 0;
-               } else if (tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) {
+               } else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
+                          PKT_TX_TCP_UDP_CKSUM) {
                        /* TCP/UDP CSO */
                        txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
                        txbd1->mss = 0;
                        /* TCP/UDP CSO */
                        txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
                        txbd1->mss = 0;
index 8081981..eea6ccc 100644 (file)
@@ -1109,7 +1109,8 @@ bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
        uint8_t i;
 
        for (i = 0; i < internals->active_slave_count; i++)
        uint8_t i;
 
        for (i = 0; i < internals->active_slave_count; i++)
-               bond_mode_8023ad_activate_slave(bond_dev, i);
+               bond_mode_8023ad_activate_slave(bond_dev,
+                               internals->active_slaves[i]);
 
        return 0;
 }
 
        return 0;
 }
index 2a3893a..4b6f147 100644 (file)
@@ -412,8 +412,13 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
        if (internals->slave_count < 1) {
                /* if MAC is not user defined then use MAC of first slave add to
                 * bonded device */
        if (internals->slave_count < 1) {
                /* if MAC is not user defined then use MAC of first slave add to
                 * bonded device */
-               if (!internals->user_defined_mac)
-                       mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs);
+               if (!internals->user_defined_mac) {
+                       if (mac_address_set(bonded_eth_dev,
+                                           slave_eth_dev->data->mac_addrs)) {
+                               RTE_BOND_LOG(ERR, "Failed to set MAC address");
+                               return -1;
+                       }
+               }
 
                /* Inherit eth dev link properties from first slave */
                link_properties_set(bonded_eth_dev,
 
                /* Inherit eth dev link properties from first slave */
                link_properties_set(bonded_eth_dev,
@@ -565,7 +570,7 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
                        &rte_eth_devices[bonded_port_id].data->port_id);
 
        /* Restore original MAC address of slave device */
                        &rte_eth_devices[bonded_port_id].data->port_id);
 
        /* Restore original MAC address of slave device */
-       mac_address_set(&rte_eth_devices[slave_port_id],
+       rte_eth_dev_default_mac_addr_set(slave_port_id,
                        &(internals->slaves[slave_idx].persisted_mac_addr));
 
        slave_eth_dev = &rte_eth_devices[slave_port_id];
                        &(internals->slaves[slave_idx].persisted_mac_addr));
 
        slave_eth_dev = &rte_eth_devices[slave_port_id];
index 7811a5a..c1ec3aa 100644 (file)
@@ -1217,7 +1217,8 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
        case BONDING_MODE_BALANCE:
        case BONDING_MODE_BROADCAST:
                for (i = 0; i < internals->slave_count; i++) {
        case BONDING_MODE_BALANCE:
        case BONDING_MODE_BROADCAST:
                for (i = 0; i < internals->slave_count; i++) {
-                       if (mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
+                       if (rte_eth_dev_default_mac_addr_set(
+                                       internals->slaves[i].port_id,
                                        bonded_eth_dev->data->mac_addrs)) {
                                RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
                                                internals->slaves[i].port_id);
                                        bonded_eth_dev->data->mac_addrs)) {
                                RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
                                                internals->slaves[i].port_id);
@@ -1235,15 +1236,16 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
                for (i = 0; i < internals->slave_count; i++) {
                        if (internals->slaves[i].port_id ==
                                        internals->current_primary_port) {
                for (i = 0; i < internals->slave_count; i++) {
                        if (internals->slaves[i].port_id ==
                                        internals->current_primary_port) {
-                               if (mac_address_set(&rte_eth_devices[internals->primary_port],
+                               if (rte_eth_dev_default_mac_addr_set(
+                                               internals->primary_port,
                                                bonded_eth_dev->data->mac_addrs)) {
                                        RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
                                                        internals->current_primary_port);
                                        return -1;
                                }
                        } else {
                                                bonded_eth_dev->data->mac_addrs)) {
                                        RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
                                                        internals->current_primary_port);
                                        return -1;
                                }
                        } else {
-                               if (mac_address_set(
-                                               &rte_eth_devices[internals->slaves[i].port_id],
+                               if (rte_eth_dev_default_mac_addr_set(
+                                               internals->slaves[i].port_id,
                                                &internals->slaves[i].persisted_mac_addr)) {
                                        RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
                                                        internals->slaves[i].port_id);
                                                &internals->slaves[i].persisted_mac_addr)) {
                                        RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address",
                                                        internals->slaves[i].port_id);
index aee3d34..58495a5 100644 (file)
@@ -1145,7 +1145,7 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                link.link_speed = 0;
                link.link_duplex = ETH_LINK_HALF_DUPLEX;
                link.link_status = ETH_LINK_DOWN;
                link.link_speed = 0;
                link.link_duplex = ETH_LINK_HALF_DUPLEX;
                link.link_status = ETH_LINK_DOWN;
-               link.link_autoneg = ETH_LINK_SPEED_FIXED;
+               link.link_autoneg = ETH_LINK_FIXED;
        }
        rte_em_dev_atomic_write_link_status(dev, &link);
 
        }
        rte_em_dev_atomic_write_link_status(dev, &link);
 
index 407021d..5108ff3 100644 (file)
@@ -1121,7 +1121,7 @@ igb_check_mq_mode(struct rte_eth_dev *dev)
        enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
        enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
        uint16_t nb_rx_q = dev->data->nb_rx_queues;
        enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
        enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
        uint16_t nb_rx_q = dev->data->nb_rx_queues;
-       uint16_t nb_tx_q = dev->data->nb_rx_queues;
+       uint16_t nb_tx_q = dev->data->nb_tx_queues;
 
        if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
            tx_mq_mode == ETH_MQ_TX_DCB ||
 
        if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
            tx_mq_mode == ETH_MQ_TX_DCB ||
@@ -2226,7 +2226,7 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                link.link_speed = 0;
                link.link_duplex = ETH_LINK_HALF_DUPLEX;
                link.link_status = ETH_LINK_DOWN;
                link.link_speed = 0;
                link.link_duplex = ETH_LINK_HALF_DUPLEX;
                link.link_status = ETH_LINK_DOWN;
-               link.link_autoneg = ETH_LINK_SPEED_FIXED;
+               link.link_autoneg = ETH_LINK_FIXED;
        }
        rte_igb_dev_atomic_write_link_status(dev, &link);
 
        }
        rte_igb_dev_atomic_write_link_status(dev, &link);
 
@@ -2757,12 +2757,17 @@ void igbvf_mbx_process(struct rte_eth_dev *dev)
        struct e1000_mbx_info *mbx = &hw->mbx;
        u32 in_msg = 0;
 
        struct e1000_mbx_info *mbx = &hw->mbx;
        u32 in_msg = 0;
 
-       if (mbx->ops.read(hw, &in_msg, 1, 0))
-               return;
+       /* peek the message first */
+       in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0));
 
        /* PF reset VF event */
 
        /* PF reset VF event */
-       if (in_msg == E1000_PF_CONTROL_MSG)
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+       if (in_msg == E1000_PF_CONTROL_MSG) {
+               /* dummy mbx read to ack pf */
+               if (mbx->ops.read(hw, &in_msg, 1, 0))
+                       return;
+               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+                                             NULL);
+       }
 }
 
 static int
 }
 
 static int
@@ -3085,7 +3090,8 @@ igbvf_dev_start(struct rte_eth_dev *dev)
        }
 
        /* check and configure queue intr-vector mapping */
        }
 
        /* check and configure queue intr-vector mapping */
-       if (dev->data->dev_conf.intr_conf.rxq != 0) {
+       if (rte_intr_cap_multiple(intr_handle) &&
+           dev->data->dev_conf.intr_conf.rxq) {
                intr_vector = dev->data->nb_rx_queues;
                ret = rte_intr_efd_enable(intr_handle, intr_vector);
                if (ret)
                intr_vector = dev->data->nb_rx_queues;
                ret = rte_intr_efd_enable(intr_handle, intr_vector);
                if (ret)
index 6efe0c3..63c4236 100644 (file)
@@ -248,16 +248,17 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
                                       struct ena_com_rx_ctx *ena_rx_ctx)
 {
        uint64_t ol_flags = 0;
                                       struct ena_com_rx_ctx *ena_rx_ctx)
 {
        uint64_t ol_flags = 0;
+       uint32_t packet_type = 0;
 
        if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
 
        if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
-               ol_flags |= PKT_TX_TCP_CKSUM;
+               packet_type |= RTE_PTYPE_L4_TCP;
        else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
        else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
-               ol_flags |= PKT_TX_UDP_CKSUM;
+               packet_type |= RTE_PTYPE_L4_UDP;
 
        if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4)
 
        if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4)
-               ol_flags |= PKT_TX_IPV4;
+               packet_type |= RTE_PTYPE_L3_IPV4;
        else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6)
        else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6)
-               ol_flags |= PKT_TX_IPV6;
+               packet_type |= RTE_PTYPE_L3_IPV6;
 
        if (unlikely(ena_rx_ctx->l4_csum_err))
                ol_flags |= PKT_RX_L4_CKSUM_BAD;
 
        if (unlikely(ena_rx_ctx->l4_csum_err))
                ol_flags |= PKT_RX_L4_CKSUM_BAD;
@@ -265,6 +266,7 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
                ol_flags |= PKT_RX_IP_CKSUM_BAD;
 
        mbuf->ol_flags = ol_flags;
                ol_flags |= PKT_RX_IP_CKSUM_BAD;
 
        mbuf->ol_flags = ol_flags;
+       mbuf->packet_type = packet_type;
 }
 
 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
 }
 
 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
index a3d2a0f..46f20b2 100644 (file)
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Poll-mode Driver"
 #define DRV_COPYRIGHT          "Copyright 2008-2015 Cisco Systems, Inc"
 
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Poll-mode Driver"
 #define DRV_COPYRIGHT          "Copyright 2008-2015 Cisco Systems, Inc"
 
-#define ENIC_WQ_MAX            8
-/* With Rx scatter support, we use two RQs on VIC per RQ used by app. Both
- * RQs use the same CQ.
- */
-#define ENIC_RQ_MAX            16
-#define ENIC_CQ_MAX            (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2))
-#define ENIC_INTR_MAX          (ENIC_CQ_MAX + 2)
-
 #define VLAN_ETH_HLEN           18
 
 #define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
 #define VLAN_ETH_HLEN           18
 
 #define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
@@ -139,17 +131,17 @@ struct enic {
        unsigned int flags;
        unsigned int priv_flags;
 
        unsigned int flags;
        unsigned int priv_flags;
 
-       /* work queue */
-       struct vnic_wq wq[ENIC_WQ_MAX];
-       unsigned int wq_count;
+       /* work queue (len = conf_wq_count) */
+       struct vnic_wq *wq;
+       unsigned int wq_count; /* equals eth_dev nb_tx_queues */
 
 
-       /* receive queue */
-       struct vnic_rq rq[ENIC_RQ_MAX];
-       unsigned int rq_count;
+       /* receive queue (len = conf_rq_count) */
+       struct vnic_rq *rq;
+       unsigned int rq_count; /* equals eth_dev nb_rx_queues */
 
 
-       /* completion queue */
-       struct vnic_cq cq[ENIC_CQ_MAX];
-       unsigned int cq_count;
+       /* completion queue (len = conf_cq_count) */
+       struct vnic_cq *cq;
+       unsigned int cq_count; /* equals rq_count + wq_count */
 
        /* interrupt resource */
        struct vnic_intr intr;
 
        /* interrupt resource */
        struct vnic_intr intr;
index 2b154ec..17479d4 100644 (file)
@@ -184,13 +184,7 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
        struct enic *enic = pmd_priv(eth_dev);
 
        ENICPMD_FUNC_TRACE();
        struct enic *enic = pmd_priv(eth_dev);
 
        ENICPMD_FUNC_TRACE();
-       if (queue_idx >= ENIC_WQ_MAX) {
-               dev_err(enic,
-                       "Max number of TX queues exceeded.  Max is %d\n",
-                       ENIC_WQ_MAX);
-               return -EINVAL;
-       }
-
+       RTE_ASSERT(queue_idx < enic->conf_wq_count);
        eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
 
        ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
        eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
 
        ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
@@ -302,16 +296,8 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
        struct enic *enic = pmd_priv(eth_dev);
 
        ENICPMD_FUNC_TRACE();
        struct enic *enic = pmd_priv(eth_dev);
 
        ENICPMD_FUNC_TRACE();
-       /* With Rx scatter support, two RQs are now used on VIC per RQ used
-        * by the application.
-        */
-       if (queue_idx * 2 >= ENIC_RQ_MAX) {
-               dev_err(enic,
-                       "Max number of RX queues exceeded.  Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n",
-                       ENIC_RQ_MAX);
-               return -EINVAL;
-       }
 
 
+       RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
        eth_dev->data->rx_queues[queue_idx] =
                (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
 
        eth_dev->data->rx_queues[queue_idx] =
                (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
 
index b25eff4..63d0c50 100644 (file)
@@ -1080,6 +1080,9 @@ static void enic_dev_deinit(struct enic *enic)
        vnic_dev_notify_unset(enic->vdev);
 
        rte_free(eth_dev->data->mac_addrs);
        vnic_dev_notify_unset(enic->vdev);
 
        rte_free(eth_dev->data->mac_addrs);
+       rte_free(enic->cq);
+       rte_free(enic->rq);
+       rte_free(enic->wq);
 }
 
 
 }
 
 
@@ -1087,27 +1090,28 @@ int enic_set_vnic_res(struct enic *enic)
 {
        struct rte_eth_dev *eth_dev = enic->rte_dev;
        int rc = 0;
 {
        struct rte_eth_dev *eth_dev = enic->rte_dev;
        int rc = 0;
+       unsigned int required_rq, required_wq, required_cq;
 
 
-       /* With Rx scatter support, two RQs are now used per RQ used by
-        * the application.
-        */
-       if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
+       /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
+       required_rq = eth_dev->data->nb_rx_queues * 2;
+       required_wq = eth_dev->data->nb_tx_queues;
+       required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
+
+       if (enic->conf_rq_count < required_rq) {
                dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
                        eth_dev->data->nb_rx_queues,
                dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
                        eth_dev->data->nb_rx_queues,
-                       eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
+                       required_rq, enic->conf_rq_count);
                rc = -EINVAL;
        }
                rc = -EINVAL;
        }
-       if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
+       if (enic->conf_wq_count < required_wq) {
                dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
                        eth_dev->data->nb_tx_queues, enic->conf_wq_count);
                rc = -EINVAL;
        }
 
                dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
                        eth_dev->data->nb_tx_queues, enic->conf_wq_count);
                rc = -EINVAL;
        }
 
-       if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
-                                  eth_dev->data->nb_tx_queues)) {
+       if (enic->conf_cq_count < required_cq) {
                dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
                dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
-                       (eth_dev->data->nb_rx_queues +
-                        eth_dev->data->nb_tx_queues), enic->conf_cq_count);
+                       required_cq, enic->conf_cq_count);
                rc = -EINVAL;
        }
 
                rc = -EINVAL;
        }
 
@@ -1309,6 +1313,25 @@ static int enic_dev_init(struct enic *enic)
                dev_err(enic, "See the ENIC PMD guide for more information.\n");
                return -EINVAL;
        }
                dev_err(enic, "See the ENIC PMD guide for more information.\n");
                return -EINVAL;
        }
+       /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
+       enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
+                              enic->conf_cq_count, 8);
+       enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
+                              enic->conf_rq_count, 8);
+       enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
+                              enic->conf_wq_count, 8);
+       if (enic->conf_cq_count > 0 && enic->cq == NULL) {
+               dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
+               return -1;
+       }
+       if (enic->conf_rq_count > 0 && enic->rq == NULL) {
+               dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
+               return -1;
+       }
+       if (enic->conf_wq_count > 0 && enic->wq == NULL) {
+               dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
+               return -1;
+       }
 
        /* Get the supported filters */
        enic_fdir_info(enic);
 
        /* Get the supported filters */
        enic_fdir_info(enic);
index d04efdc..0fac816 100644 (file)
@@ -53,7 +53,7 @@
 /* Wait interval to get switch status */
 #define WAIT_SWITCH_MSG_US    100000
 /* A period of quiescence for switch */
 /* Wait interval to get switch status */
 #define WAIT_SWITCH_MSG_US    100000
 /* A period of quiescence for switch */
-#define FM10K_SWITCH_QUIESCE_US 10000
+#define FM10K_SWITCH_QUIESCE_US 100000
 /* Number of chars per uint32 type */
 #define CHARS_PER_UINT32 (sizeof(uint32_t))
 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
 /* Number of chars per uint32 type */
 #define CHARS_PER_UINT32 (sizeof(uint32_t))
 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
@@ -1239,7 +1239,7 @@ fm10k_dev_close(struct rte_eth_dev *dev)
                MAX_LPORT_NUM, false);
        fm10k_mbx_unlock(hw);
 
                MAX_LPORT_NUM, false);
        fm10k_mbx_unlock(hw);
 
-       /* allow 10ms for device to quiesce */
+       /* allow 100ms for device to quiesce */
        rte_delay_us(FM10K_SWITCH_QUIESCE_US);
 
        /* Stop mailbox service first */
        rte_delay_us(FM10K_SWITCH_QUIESCE_US);
 
        /* Stop mailbox service first */
index 13085fb..9c9a867 100644 (file)
@@ -99,6 +99,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
 ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
 ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
+else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c
 else
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c
 endif
 else
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c
 endif
index 0d3a83f..e231582 100644 (file)
@@ -682,6 +682,12 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
                           &oem_lo);
        hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
 
                           &oem_lo);
        hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
 
+       /* Newer versions of firmware require lock when reading the NVM */
+       if ((hw->aq.api_maj_ver > 1) ||
+           ((hw->aq.api_maj_ver == 1) &&
+            (hw->aq.api_min_ver >= 5)))
+               hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
        if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
                ret_code = I40E_ERR_FIRMWARE_API_VERSION;
                goto init_adminq_free_arq;
        if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
                ret_code = I40E_ERR_FIRMWARE_API_VERSION;
                goto init_adminq_free_arq;
@@ -1051,22 +1057,19 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
        }
 
        /* set next_to_use to head */
        }
 
        /* set next_to_use to head */
-#ifdef PF_DRIVER
 #ifdef INTEGRATED_VF
        if (!i40e_is_vf(hw))
 #ifdef INTEGRATED_VF
        if (!i40e_is_vf(hw))
-               ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+               ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+       else
+               ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
 #else
 #else
-       ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
-#endif /* INTEGRATED_VF */
+#ifdef PF_DRIVER
+       ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
 #endif /* PF_DRIVER */
 #ifdef VF_DRIVER
 #endif /* PF_DRIVER */
 #ifdef VF_DRIVER
-#ifdef INTEGRATED_VF
-       if (i40e_is_vf(hw))
-               ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
-#else
-       ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
-#endif /* INTEGRATED_VF */
+       ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
 #endif /* VF_DRIVER */
 #endif /* VF_DRIVER */
+#endif /* INTEGRATED_VF */
        if (ntu == ntc) {
                /* nothing to do - shouldn't need to update ring's values */
                ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
        if (ntu == ntc) {
                /* nothing to do - shouldn't need to update ring's values */
                ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
index 9a6b3ed..de60e2a 100644 (file)
@@ -1046,7 +1046,8 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
 
 #ifdef X722_SUPPORT
        if (hw->mac.type == I40E_MAC_X722)
 
 #ifdef X722_SUPPORT
        if (hw->mac.type == I40E_MAC_X722)
-               hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+               hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+                            I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
 
 #endif
        status = i40e_init_nvm(hw);
 
 #endif
        status = i40e_init_nvm(hw);
@@ -1578,6 +1579,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
                case I40E_COMBINED_ACTIVITY:
                case I40E_FILTER_ACTIVITY:
                case I40E_MAC_ACTIVITY:
                case I40E_COMBINED_ACTIVITY:
                case I40E_FILTER_ACTIVITY:
                case I40E_MAC_ACTIVITY:
+               case I40E_LINK_ACTIVITY:
                        continue;
                default:
                        break;
                        continue;
                default:
                        break;
@@ -1626,6 +1628,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
                case I40E_COMBINED_ACTIVITY:
                case I40E_FILTER_ACTIVITY:
                case I40E_MAC_ACTIVITY:
                case I40E_COMBINED_ACTIVITY:
                case I40E_FILTER_ACTIVITY:
                case I40E_MAC_ACTIVITY:
+               case I40E_LINK_ACTIVITY:
                        continue;
                default:
                        break;
                        continue;
                default:
                        break;
@@ -1636,9 +1639,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
                gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
                             I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
 
                gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
                             I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
 
-               if (mode == I40E_LINK_ACTIVITY)
-                       blink = false;
-
                if (blink)
                        gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
                else
                if (blink)
                        gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
                else
index 4fa1220..4976b1f 100644 (file)
@@ -221,7 +221,8 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
 
 #ifdef X722_SUPPORT
        if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
 
 #ifdef X722_SUPPORT
        if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
-               ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+               if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+                       ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
                if (!ret_code) {
                        ret_code = i40e_read_nvm_word_aq(hw, offset, data);
                        i40e_release_nvm(hw);
                if (!ret_code) {
                        ret_code = i40e_read_nvm_word_aq(hw, offset, data);
                        i40e_release_nvm(hw);
index b5f72c3..d514abe 100644 (file)
@@ -685,6 +685,7 @@ struct i40e_hw {
 
 #endif
 #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
 
 #endif
 #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
        u64 flags;
 
        /* debug mask */
        u64 flags;
 
        /* debug mask */
index 0835c2d..0b270b6 100644 (file)
@@ -716,6 +716,15 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
        return 0;
 }
 
        return 0;
 }
 
+static inline void
+i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+{
+       i40e_write_rx_ctl(hw, reg_addr, reg_val);
+       PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
+                   "with value 0x%08x",
+                   reg_addr, reg_val);
+}
+
 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
 
 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
 
@@ -735,9 +744,10 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
         * configuration API is added to avoid configuration conflicts
         * between ports of the same device.
         */
         * configuration API is added to avoid configuration conflicts
         * between ports of the same device.
         */
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
+       I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
+       I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
+       I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
+       i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
 
        /*
         * Initialize registers for parsing packet type of QinQ
 
        /*
         * Initialize registers for parsing packet type of QinQ
@@ -745,8 +755,26 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
         * configuration API is added to avoid configuration conflicts
         * between ports of the same device.
         */
         * configuration API is added to avoid configuration conflicts
         * between ports of the same device.
         */
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
-       I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
+       I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
+       I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
+       i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
+}
+
+static inline void i40e_config_automask(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+       uint32_t val;
+
+       /* INTENA flag is not auto-cleared for interrupt */
+       val = I40E_READ_REG(hw, I40E_GLINT_CTL);
+       val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
+               I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+
+       /* If support multi-driver, PF will use INT0. */
+       if (!pf->support_multi_driver)
+               val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
+
+       I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
 }
 
 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
 }
 
 #define I40E_FLOW_CONTROL_ETHERTYPE  0x8808
@@ -933,6 +961,71 @@ config_floating_veb(struct rte_eth_dev *dev)
 #define I40E_L2_TAGS_S_TAG_SHIFT 1
 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
 
 #define I40E_L2_TAGS_S_TAG_SHIFT 1
 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
 
+#define ETH_I40E_SUPPORT_MULTI_DRIVER  "support-multi-driver"
+RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
+                             ETH_I40E_SUPPORT_MULTI_DRIVER "=0|1");
+
+static int
+i40e_parse_multi_drv_handler(__rte_unused const char *key,
+                             const char *value,
+                             void *opaque)
+{
+       struct i40e_pf *pf;
+       unsigned long support_multi_driver;
+       char *end;
+
+       pf = (struct i40e_pf *)opaque;
+
+       errno = 0;
+       support_multi_driver = strtoul(value, &end, 10);
+       if (errno != 0 || end == value || *end != 0) {
+               PMD_DRV_LOG(WARNING, "Wrong global configuration");
+               return -(EINVAL);
+       }
+
+       if (support_multi_driver == 1 || support_multi_driver == 0)
+               pf->support_multi_driver = (bool)support_multi_driver;
+       else
+               PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
+                           "enable global configuration by default."
+                           ETH_I40E_SUPPORT_MULTI_DRIVER);
+       return 0;
+}
+
+static int
+i40e_support_multi_driver(struct rte_eth_dev *dev)
+{
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct rte_pci_device *pci_dev = dev->pci_dev;
+       static const char *valid_keys[] = {
+               ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
+       struct rte_kvargs *kvlist;
+
+       /* Enable global configuration by default */
+       pf->support_multi_driver = false;
+
+       if (!pci_dev->device.devargs)
+               return 0;
+
+       kvlist = rte_kvargs_parse(pci_dev->device.devargs->args, valid_keys);
+       if (!kvlist)
+               return -EINVAL;
+
+       if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
+               PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+                           "the first invalid or last valid one is used !",
+                           ETH_I40E_SUPPORT_MULTI_DRIVER);
+
+       if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
+                              i40e_parse_multi_drv_handler, pf) < 0) {
+               rte_kvargs_free(kvlist);
+               return -EINVAL;
+       }
+
+       rte_kvargs_free(kvlist);
+       return 0;
+}
+
 static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
 static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
@@ -982,6 +1075,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
        hw->bus.func = pci_dev->addr.function;
        hw->adapter_stopped = 0;
 
        hw->bus.func = pci_dev->addr.function;
        hw->adapter_stopped = 0;
 
+       /* Check if need to support multi-driver */
+       i40e_support_multi_driver(dev);
+
        /* Make sure all is clean before doing PF reset */
        i40e_clear_hw(hw);
 
        /* Make sure all is clean before doing PF reset */
        i40e_clear_hw(hw);
 
@@ -1002,13 +1098,16 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
                return ret;
        }
 
                return ret;
        }
 
+       i40e_config_automask(pf);
+
        /*
         * To work around the NVM issue, initialize registers
         * for flexible payload and packet type of QinQ by
         * software. It should be removed once issues are fixed
         * in NVM.
         */
        /*
         * To work around the NVM issue, initialize registers
         * for flexible payload and packet type of QinQ by
         * software. It should be removed once issues are fixed
         * in NVM.
         */
-       i40e_GLQF_reg_init(hw);
+       if (!pf->support_multi_driver)
+               i40e_GLQF_reg_init(hw);
 
        /* Initialize the input set for filters (hash and fd) to default value */
        i40e_filter_input_set_init(pf);
 
        /* Initialize the input set for filters (hash and fd) to default value */
        i40e_filter_input_set_init(pf);
@@ -1104,11 +1203,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
        i40e_set_fc(hw, &aq_fail, TRUE);
 
        /* Set the global registers with default ether type value */
        i40e_set_fc(hw, &aq_fail, TRUE);
 
        /* Set the global registers with default ether type value */
-       ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
-       if (ret != I40E_SUCCESS) {
-               PMD_INIT_LOG(ERR, "Failed to set the default outer "
-                            "VLAN ether type");
-               goto err_setup_pf_switch;
+       if (!pf->support_multi_driver) {
+               ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
+                                        ETHER_TYPE_VLAN);
+               if (ret != I40E_SUCCESS) {
+                       PMD_INIT_LOG(ERR, "Failed to set the default outer "
+                                    "VLAN ether type");
+                       goto err_setup_pf_switch;
+               }
        }
 
        /* PF setup, which includes VSI setup */
        }
 
        /* PF setup, which includes VSI setup */
@@ -1384,6 +1486,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
        int i;
        uint32_t val;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        int i;
        uint32_t val;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
 
        /* Bind all RX queues to allocated MSIX interrupt */
        for (i = 0; i < nb_queue; i++) {
 
        /* Bind all RX queues to allocated MSIX interrupt */
        for (i = 0; i < nb_queue; i++) {
@@ -1402,7 +1505,8 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
        /* Write first RX queue to Link list register as the head element */
        if (vsi->type != I40E_VSI_SRIOV) {
                uint16_t interval =
        /* Write first RX queue to Link list register as the head element */
        if (vsi->type != I40E_VSI_SRIOV) {
                uint16_t interval =
-                       i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+                       i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL,
+                                              pf->support_multi_driver);
 
                if (msix_vect == I40E_MISC_VEC_ID) {
                        I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
 
                if (msix_vect == I40E_MISC_VEC_ID) {
                        I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
@@ -1460,7 +1564,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
        uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
        uint16_t queue_idx = 0;
        int record = 0;
        uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
        uint16_t queue_idx = 0;
        int record = 0;
-       uint32_t val;
        int i;
 
        for (i = 0; i < vsi->nb_qps; i++) {
        int i;
 
        for (i = 0; i < vsi->nb_qps; i++) {
@@ -1468,13 +1571,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
                I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
        }
 
                I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
        }
 
-       /* INTENA flag is not auto-cleared for interrupt */
-       val = I40E_READ_REG(hw, I40E_GLINT_CTL);
-       val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
-               I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK |
-               I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
-       I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
-
        /* VF bind interrupt */
        if (vsi->type == I40E_VSI_SRIOV) {
                __vsi_queues_bind_intr(vsi, msix_vect,
        /* VF bind interrupt */
        if (vsi->type == I40E_VSI_SRIOV) {
                __vsi_queues_bind_intr(vsi, msix_vect,
@@ -1527,27 +1623,22 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
-       uint16_t interval = i40e_calc_itr_interval(\
-               RTE_LIBRTE_I40E_ITR_INTERVAL);
+       struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        uint16_t msix_intr, i;
 
        uint16_t msix_intr, i;
 
-       if (rte_intr_allow_others(intr_handle))
+       if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
                for (i = 0; i < vsi->nb_msix; i++) {
                        msix_intr = vsi->msix_intr + i;
                        I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
                for (i = 0; i < vsi->nb_msix; i++) {
                        msix_intr = vsi->msix_intr + i;
                        I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
-                               I40E_PFINT_DYN_CTLN_INTENA_MASK |
-                               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-                               (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-                               (interval <<
-                                I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+                                      I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                                      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+                                      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
                }
        else
                I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
                               I40E_PFINT_DYN_CTL0_INTENA_MASK |
                               I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
                }
        else
                I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
                               I40E_PFINT_DYN_CTL0_INTENA_MASK |
                               I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
-                              (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) |
-                              (interval <<
-                               I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT));
+                              I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
 
        I40E_WRITE_FLUSH(hw);
 }
 
        I40E_WRITE_FLUSH(hw);
 }
@@ -1558,16 +1649,18 @@ i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
        struct rte_eth_dev *dev = vsi->adapter->eth_dev;
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+       struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
        uint16_t msix_intr, i;
 
        uint16_t msix_intr, i;
 
-       if (rte_intr_allow_others(intr_handle))
+       if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
                for (i = 0; i < vsi->nb_msix; i++) {
                        msix_intr = vsi->msix_intr + i;
                        I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
                for (i = 0; i < vsi->nb_msix; i++) {
                        msix_intr = vsi->msix_intr + i;
                        I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
-                                      0);
+                                      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
                }
        else
                }
        else
-               I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+               I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+                              I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
 
        I40E_WRITE_FLUSH(hw);
 }
 
        I40E_WRITE_FLUSH(hw);
 }
@@ -2743,11 +2836,17 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
                   uint16_t tpid)
 {
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
                   uint16_t tpid)
 {
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        uint64_t reg_r = 0, reg_w = 0;
        uint16_t reg_id = 0;
        int ret = 0;
        int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
 
        uint64_t reg_r = 0, reg_w = 0;
        uint16_t reg_id = 0;
        int ret = 0;
        int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
 
+       if (pf->support_multi_driver) {
+               PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
+               return -ENOTSUP;
+       }
+
        switch (vlan_type) {
        case ETH_VLAN_TYPE_OUTER:
                if (qinq)
        switch (vlan_type) {
        case ETH_VLAN_TYPE_OUTER:
                if (qinq)
@@ -2797,8 +2896,11 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
                            "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
                return ret;
        }
                            "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
                return ret;
        }
-       PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
-                   "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
+       PMD_DRV_LOG(DEBUG,
+                   "Global register 0x%08x is changed with value 0x%08x",
+                   I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
+
+       i40e_global_cfg_warning(I40E_WARNING_TPID);
 
        return ret;
 }
 
        return ret;
 }
@@ -3025,19 +3127,25 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
        }
 
                I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
        }
 
-       /* config the water marker both based on the packets and bytes */
-       I40E_WRITE_REG(hw, I40E_GLRPB_PHW,
-                      (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
-                      << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
-       I40E_WRITE_REG(hw, I40E_GLRPB_PLW,
-                      (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
-                      << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
-       I40E_WRITE_REG(hw, I40E_GLRPB_GHW,
-                      pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
-                      << I40E_KILOSHIFT);
-       I40E_WRITE_REG(hw, I40E_GLRPB_GLW,
-                      pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
-                      << I40E_KILOSHIFT);
+       if (!pf->support_multi_driver) {
+               /* config water marker both based on the packets and bytes */
+               I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
+                               (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+                                << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+               I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
+                               (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+                                << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
+               I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
+                                pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
+                                << I40E_KILOSHIFT);
+               I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
+                                 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
+                                 << I40E_KILOSHIFT);
+               i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
+       } else {
+               PMD_DRV_LOG(ERR,
+                           "Water marker configuration is not supported.");
+       }
 
        I40E_WRITE_FLUSH(hw);
 
 
        I40E_WRITE_FLUSH(hw);
 
@@ -4524,16 +4632,28 @@ i40e_vsi_setup(struct i40e_pf *pf,
 
        /* VF has MSIX interrupt in VF range, don't allocate here */
        if (type == I40E_VSI_MAIN) {
 
        /* VF has MSIX interrupt in VF range, don't allocate here */
        if (type == I40E_VSI_MAIN) {
-               ret = i40e_res_pool_alloc(&pf->msix_pool,
-                                         RTE_MIN(vsi->nb_qps,
-                                                 RTE_MAX_RXTX_INTR_VEC_ID));
-               if (ret < 0) {
-                       PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d",
-                                   vsi->seid, ret);
-                       goto fail_queue_alloc;
+               if (pf->support_multi_driver) {
+                       /* If support multi-driver, need to use INT0 instead of
+                        * allocating from msix pool. The Msix pool is init from
+                        * INT1, so it's OK just set msix_intr to 0 and nb_msix
+                        * to 1 without calling i40e_res_pool_alloc.
+                        */
+                       vsi->msix_intr = 0;
+                       vsi->nb_msix = 1;
+               } else {
+                       ret = i40e_res_pool_alloc(&pf->msix_pool,
+                                                 RTE_MIN(vsi->nb_qps,
+                                                    RTE_MAX_RXTX_INTR_VEC_ID));
+                       if (ret < 0) {
+                               PMD_DRV_LOG(ERR,
+                                           "VSI MAIN %d get heap failed %d",
+                                           vsi->seid, ret);
+                               goto fail_queue_alloc;
+                       }
+                       vsi->msix_intr = ret;
+                       vsi->nb_msix = RTE_MIN(vsi->nb_qps,
+                                              RTE_MAX_RXTX_INTR_VEC_ID);
                }
                }
-               vsi->msix_intr = ret;
-               vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
        } else if (type != I40E_VSI_SRIOV) {
                ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
                if (ret < 0) {
        } else if (type != I40E_VSI_SRIOV) {
                ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
                if (ret < 0) {
@@ -4888,11 +5008,11 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev)
        int mask = 0;
 
        /* Apply vlan offload setting */
        int mask = 0;
 
        /* Apply vlan offload setting */
-       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+       mask = ETH_VLAN_STRIP_MASK |
+              ETH_VLAN_FILTER_MASK |
+              ETH_VLAN_EXTEND_MASK;
        i40e_vlan_offload_set(dev, mask);
 
        i40e_vlan_offload_set(dev, mask);
 
-       /* Apply double-vlan setting, not implemented yet */
-
        /* Apply pvid setting */
        ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
                                data->dev_conf.txmode.hw_vlan_insert_pvid);
        /* Apply pvid setting */
        ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
                                data->dev_conf.txmode.hw_vlan_insert_pvid);
@@ -5446,7 +5566,8 @@ void
 i40e_pf_disable_irq0(struct i40e_hw *hw)
 {
        /* Disable all interrupt types */
 i40e_pf_disable_irq0(struct i40e_hw *hw)
 {
        /* Disable all interrupt types */
-       I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+       I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+                      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
        I40E_WRITE_FLUSH(hw);
 }
 
        I40E_WRITE_FLUSH(hw);
 }
 
@@ -6507,7 +6628,7 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
                        uint8_t add)
 {
        uint16_t ip_type;
                        uint8_t add)
 {
        uint16_t ip_type;
-       uint32_t ipv4_addr;
+       uint32_t ipv4_addr, ipv4_addr_le;
        uint8_t i, tun_type = 0;
        /* internal varialbe to convert ipv6 byte order */
        uint32_t convert_ipv6[4];
        uint8_t i, tun_type = 0;
        /* internal varialbe to convert ipv6 byte order */
        uint32_t convert_ipv6[4];
@@ -6534,8 +6655,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
        if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
                ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
                ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
        if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
                ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
                ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+               ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
                rte_memcpy(&pfilter->ipaddr.v4.data,
                rte_memcpy(&pfilter->ipaddr.v4.data,
-                               &rte_cpu_to_le_32(ipv4_addr),
+                               &ipv4_addr_le,
                                sizeof(pfilter->ipaddr.v4.data));
        } else {
                ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
                                sizeof(pfilter->ipaddr.v4.data));
        } else {
                ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
@@ -6855,9 +6977,15 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf,
 static int
 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
 {
 static int
 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
 {
+       struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
        uint32_t val, reg;
        int ret = -EINVAL;
 
        uint32_t val, reg;
        int ret = -EINVAL;
 
+       if (pf->support_multi_driver) {
+               PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
+               return -ENOTSUP;
+       }
+
        val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
        PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
 
        val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
        PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
 
@@ -6875,6 +7003,10 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
                                                   reg, NULL);
                if (ret != 0)
                        return ret;
                                                   reg, NULL);
                if (ret != 0)
                        return ret;
+               PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
+                           "with value 0x%08x",
+                           I40E_GL_PRS_FVBM(2), reg);
+               i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
        } else {
                ret = 0;
        }
        } else {
                ret = 0;
        }
@@ -7095,12 +7227,18 @@ static int
 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
                                   struct rte_eth_hash_global_conf *g_cfg)
 {
 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
                                   struct rte_eth_hash_global_conf *g_cfg)
 {
+       struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
        int ret;
        uint16_t i;
        uint32_t reg;
        uint32_t mask0 = g_cfg->valid_bit_mask[0];
        enum i40e_filter_pctype pctype;
 
        int ret;
        uint16_t i;
        uint32_t reg;
        uint32_t mask0 = g_cfg->valid_bit_mask[0];
        enum i40e_filter_pctype pctype;
 
+       if (pf->support_multi_driver) {
+               PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
+               return -ENOTSUP;
+       }
+
        /* Check the input parameters */
        ret = i40e_hash_global_config_check(g_cfg);
        if (ret < 0)
        /* Check the input parameters */
        ret = i40e_hash_global_config_check(g_cfg);
        if (ret < 0)
@@ -7118,42 +7256,45 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
                                I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
                if (hw->mac.type == I40E_MAC_X722) {
                        if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
                                I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
                if (hw->mac.type == I40E_MAC_X722) {
                        if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
-                               i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+                               i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
                                  I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
                                  I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
-                               i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+                               i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
                                  I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
                                  reg);
                                  I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
                                  reg);
-                               i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+                               i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
                                  I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
                                  reg);
                        } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
                                  I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
                                  reg);
                        } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
-                               i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+                               i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
                                  I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
                                  I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
-                               i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+                               i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
                                  I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
                                  reg);
                        } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
                                  I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
                                  reg);
                        } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
-                               i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+                               i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
                                  I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
                                  I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
-                               i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+                               i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
                                  I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
                                  reg);
                                  I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
                                  reg);
-                               i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+                               i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
                                  I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
                                  reg);
                        } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
                                  I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
                                  reg);
                        } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
-                               i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+                               i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
                                  I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
                                  I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
-                               i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+                               i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(
                                  I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
                                  reg);
                        } else {
                                  I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
                                  reg);
                        } else {
-                               i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
-                                 reg);
+                               i40e_write_global_rx_ctl(hw,
+                                                        I40E_GLQF_HSYM(pctype),
+                                                        reg);
                        }
                } else {
                        }
                } else {
-                       i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+                       i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
+                                                reg);
                }
                }
+               i40e_global_cfg_warning(I40E_WARNING_HSYM);
        }
 
        reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
        }
 
        reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
@@ -7177,7 +7318,8 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
                /* Use the default, and keep it as it is */
                goto out;
 
                /* Use the default, and keep it as it is */
                goto out;
 
-       i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg);
+       i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
+       i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
 
 out:
        I40E_WRITE_FLUSH(hw);
 
 out:
        I40E_WRITE_FLUSH(hw);
@@ -7790,6 +7932,18 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
                    (uint32_t)i40e_read_rx_ctl(hw, addr));
 }
 
                    (uint32_t)i40e_read_rx_ctl(hw, addr));
 }
 
+static void
+i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
+{
+       uint32_t reg = i40e_read_rx_ctl(hw, addr);
+
+       PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
+       if (reg != val)
+               i40e_write_global_rx_ctl(hw, addr, val);
+       PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
+                   (uint32_t)i40e_read_rx_ctl(hw, addr));
+}
+
 static void
 i40e_filter_input_set_init(struct i40e_pf *pf)
 {
 static void
 i40e_filter_input_set_init(struct i40e_pf *pf)
 {
@@ -7815,6 +7969,12 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
                                                   I40E_INSET_MASK_NUM_REG);
                if (num < 0)
                        return;
                                                   I40E_INSET_MASK_NUM_REG);
                if (num < 0)
                        return;
+
+               if (pf->support_multi_driver && num > 0) {
+                       PMD_DRV_LOG(ERR, "Input set setting is not supported.");
+                       return;
+               }
+
                inset_reg = i40e_translate_input_set_reg(hw->mac.type,
                                        input_set);
 
                inset_reg = i40e_translate_input_set_reg(hw->mac.type,
                                        input_set);
 
@@ -7823,31 +7983,49 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
                i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
                                     (uint32_t)((inset_reg >>
                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
                i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
                                     (uint32_t)((inset_reg >>
                                     I40E_32_BIT_WIDTH) & UINT32_MAX));
-               i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
-                                     (uint32_t)(inset_reg & UINT32_MAX));
-               i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
-                                    (uint32_t)((inset_reg >>
-                                    I40E_32_BIT_WIDTH) & UINT32_MAX));
-
-               for (i = 0; i < num; i++) {
-                       i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
-                                            mask_reg[i]);
-                       i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
-                                            mask_reg[i]);
-               }
-               /*clear unused mask registers of the pctype */
-               for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
-                       i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
-                                            0);
-                       i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
-                                            0);
+               if (!pf->support_multi_driver) {
+                       i40e_check_write_global_reg(hw,
+                                           I40E_GLQF_HASH_INSET(0, pctype),
+                                           (uint32_t)(inset_reg & UINT32_MAX));
+                       i40e_check_write_global_reg(hw,
+                                           I40E_GLQF_HASH_INSET(1, pctype),
+                                           (uint32_t)((inset_reg >>
+                                           I40E_32_BIT_WIDTH) & UINT32_MAX));
+
+                       for (i = 0; i < num; i++) {
+                               i40e_check_write_global_reg(hw,
+                                                   I40E_GLQF_FD_MSK(i, pctype),
+                                                   mask_reg[i]);
+                               i40e_check_write_global_reg(hw,
+                                                 I40E_GLQF_HASH_MSK(i, pctype),
+                                                 mask_reg[i]);
+                       }
+                       /*clear unused mask registers of the pctype */
+                       for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
+                               i40e_check_write_global_reg(hw,
+                                                   I40E_GLQF_FD_MSK(i, pctype),
+                                                   0);
+                               i40e_check_write_global_reg(hw,
+                                                 I40E_GLQF_HASH_MSK(i, pctype),
+                                                   0);
+                       }
+               } else {
+                       PMD_DRV_LOG(ERR,
+                                   "Input set setting is not supported.");
                }
                I40E_WRITE_FLUSH(hw);
 
                /* store the default input set */
                }
                I40E_WRITE_FLUSH(hw);
 
                /* store the default input set */
-               pf->hash_input_set[pctype] = input_set;
+               if (!pf->support_multi_driver)
+                       pf->hash_input_set[pctype] = input_set;
                pf->fdir.input_set[pctype] = input_set;
        }
                pf->fdir.input_set[pctype] = input_set;
        }
+
+       if (!pf->support_multi_driver) {
+               i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
+               i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
+               i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
+       }
 }
 
 int
 }
 
 int
@@ -7860,6 +8038,11 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
        uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
        int ret, i, num;
 
        uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
        int ret, i, num;
 
+       if (pf->support_multi_driver) {
+               PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
+               return -ENOTSUP;
+       }
+
        if (!conf) {
                PMD_DRV_LOG(ERR, "Invalid pointer");
                return -EFAULT;
        if (!conf) {
                PMD_DRV_LOG(ERR, "Invalid pointer");
                return -EFAULT;
@@ -7908,19 +8091,21 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
 
        inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
 
 
        inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
 
-       i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
-                             (uint32_t)(inset_reg & UINT32_MAX));
-       i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
-                            (uint32_t)((inset_reg >>
-                            I40E_32_BIT_WIDTH) & UINT32_MAX));
+       i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+                                   (uint32_t)(inset_reg & UINT32_MAX));
+       i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+                                   (uint32_t)((inset_reg >>
+                                   I40E_32_BIT_WIDTH) & UINT32_MAX));
+       i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
 
        for (i = 0; i < num; i++)
 
        for (i = 0; i < num; i++)
-               i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
-                                    mask_reg[i]);
+               i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+                                           mask_reg[i]);
        /*clear unused mask registers of the pctype */
        for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
        /*clear unused mask registers of the pctype */
        for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
-               i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
-                                    0);
+               i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
+                                           0);
+       i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
        I40E_WRITE_FLUSH(hw);
 
        pf->hash_input_set[pctype] = input_set;
        I40E_WRITE_FLUSH(hw);
 
        pf->hash_input_set[pctype] = input_set;
@@ -7984,6 +8169,11 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
        if (num < 0)
                return -EINVAL;
 
        if (num < 0)
                return -EINVAL;
 
+       if (pf->support_multi_driver && num > 0) {
+               PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+               return -ENOTSUP;
+       }
+
        inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
 
        i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
        inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
 
        i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
@@ -7992,13 +8182,20 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
                             (uint32_t)((inset_reg >>
                             I40E_32_BIT_WIDTH) & UINT32_MAX));
 
                             (uint32_t)((inset_reg >>
                             I40E_32_BIT_WIDTH) & UINT32_MAX));
 
-       for (i = 0; i < num; i++)
-               i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
-                                    mask_reg[i]);
-       /*clear unused mask registers of the pctype */
-       for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
-               i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
-                                    0);
+       if (!pf->support_multi_driver) {
+               for (i = 0; i < num; i++)
+                       i40e_check_write_global_reg(hw,
+                                                   I40E_GLQF_FD_MSK(i, pctype),
+                                                   mask_reg[i]);
+               /*clear unused mask registers of the pctype */
+               for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
+                       i40e_check_write_global_reg(hw,
+                                                   I40E_GLQF_FD_MSK(i, pctype),
+                                                   0);
+               i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
+       } else {
+               PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
+       }
        I40E_WRITE_FLUSH(hw);
 
        pf->fdir.input_set[pctype] = input_set;
        I40E_WRITE_FLUSH(hw);
 
        pf->fdir.input_set[pctype] = input_set;
@@ -9694,27 +9891,21 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 {
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint16_t interval =
-               i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
        uint16_t msix_intr;
 
        msix_intr = intr_handle->intr_vec[queue_id];
        if (msix_intr == I40E_MISC_VEC_ID)
                I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
        uint16_t msix_intr;
 
        msix_intr = intr_handle->intr_vec[queue_id];
        if (msix_intr == I40E_MISC_VEC_ID)
                I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
-                              I40E_PFINT_DYN_CTLN_INTENA_MASK |
-                              I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-                              (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-                              (interval <<
-                               I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+                              I40E_PFINT_DYN_CTL0_INTENA_MASK |
+                              I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+                              I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
        else
                I40E_WRITE_REG(hw,
                               I40E_PFINT_DYN_CTLN(msix_intr -
                                                   I40E_RX_VEC_START),
                               I40E_PFINT_DYN_CTLN_INTENA_MASK |
                               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
        else
                I40E_WRITE_REG(hw,
                               I40E_PFINT_DYN_CTLN(msix_intr -
                                                   I40E_RX_VEC_START),
                               I40E_PFINT_DYN_CTLN_INTENA_MASK |
                               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-                              (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-                              (interval <<
-                               I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
+                              I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
 
        I40E_WRITE_FLUSH(hw);
        rte_intr_enable(&dev->pci_dev->intr_handle);
 
        I40E_WRITE_FLUSH(hw);
        rte_intr_enable(&dev->pci_dev->intr_handle);
@@ -9731,12 +9922,13 @@ i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 
        msix_intr = intr_handle->intr_vec[queue_id];
        if (msix_intr == I40E_MISC_VEC_ID)
 
        msix_intr = intr_handle->intr_vec[queue_id];
        if (msix_intr == I40E_MISC_VEC_ID)
-               I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
+               I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
+                              I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
        else
                I40E_WRITE_REG(hw,
                               I40E_PFINT_DYN_CTLN(msix_intr -
                                                   I40E_RX_VEC_START),
        else
                I40E_WRITE_REG(hw,
                               I40E_PFINT_DYN_CTLN(msix_intr -
                                                   I40E_RX_VEC_START),
-                              0);
+                              I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
        I40E_WRITE_FLUSH(hw);
 
        return 0;
        I40E_WRITE_FLUSH(hw);
 
        return 0;
@@ -9832,14 +10024,43 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
                                      struct ether_addr *mac_addr)
 {
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
                                      struct ether_addr *mac_addr)
 {
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_vsi *vsi = pf->main_vsi;
+       struct i40e_mac_filter_info mac_filter;
+       struct i40e_mac_filter *f;
+       int ret;
 
        if (!is_valid_assigned_ether_addr(mac_addr)) {
                PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
                return;
        }
 
 
        if (!is_valid_assigned_ether_addr(mac_addr)) {
                PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
                return;
        }
 
-       /* Flags: 0x3 updates port address */
-       i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL);
+       TAILQ_FOREACH(f, &vsi->mac_list, next) {
+               if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
+                       break;
+       }
+
+       if (f == NULL) {
+               PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
+               return;
+       }
+
+       mac_filter = f->mac_info;
+       ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
+       if (ret != I40E_SUCCESS) {
+               PMD_DRV_LOG(ERR, "Failed to delete mac filter");
+               return;
+       }
+       memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
+       ret = i40e_vsi_add_mac(vsi, &mac_filter);
+       if (ret != I40E_SUCCESS) {
+               PMD_DRV_LOG(ERR, "Failed to add mac filter");
+               return;
+       }
+       memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
+
+       i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+                                 mac_addr->addr_bytes, NULL);
 }
 
 static int
 }
 
 static int
index f283319..77a4466 100644 (file)
        (((vf)->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && \
        ((vf)->version_minor == 1))
 
        (((vf)->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && \
        ((vf)->version_minor == 1))
 
+static inline void
+I40E_WRITE_GLB_REG(struct i40e_hw *hw, uint32_t reg, uint32_t value) {
+       I40E_WRITE_REG(hw, reg, value);
+       PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
+                   "with value 0x%08x",
+                   reg, value);
+}
+
 /* index flex payload per layer */
 enum i40e_flxpld_layer_idx {
        I40E_FLXPLD_L2_IDX    = 0,
 /* index flex payload per layer */
 enum i40e_flxpld_layer_idx {
        I40E_FLXPLD_L2_IDX    = 0,
@@ -477,6 +485,8 @@ struct i40e_pf {
        bool floating_veb; /* The flag to use the floating VEB */
        /* The floating enable flag for the specific VF */
        bool floating_veb_list[I40E_MAX_VF];
        bool floating_veb; /* The flag to use the floating VEB */
        /* The floating enable flag for the specific VF */
        bool floating_veb_list[I40E_MAX_VF];
+
+       bool support_multi_driver; /* 1 - support multiple driver */
 };
 
 enum pending_msg {
 };
 
 enum pending_msg {
@@ -569,6 +579,22 @@ struct i40e_adapter {
        struct rte_timecounter tx_tstamp_tc;
 };
 
        struct rte_timecounter tx_tstamp_tc;
 };
 
+enum I40E_WARNING_IDX {
+       I40E_WARNING_DIS_FLX_PLD,
+       I40E_WARNING_ENA_FLX_PLD,
+       I40E_WARNING_QINQ_PARSER,
+       I40E_WARNING_QINQ_CLOUD_FILTER,
+       I40E_WARNING_TPID,
+       I40E_WARNING_FLOW_CTL,
+       I40E_WARNING_GRE_KEY_LEN,
+       I40E_WARNING_QF_CTL,
+       I40E_WARNING_HASH_INSET,
+       I40E_WARNING_HSYM,
+       I40E_WARNING_HASH_MSK,
+       I40E_WARNING_FD_MSK,
+       I40E_WARNING_RPL_CLD_FILTER,
+};
+
 int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
 int i40e_vsi_release(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
 int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
 int i40e_vsi_release(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
@@ -694,15 +720,46 @@ i40e_align_floor(int n)
 }
 
 static inline uint16_t
 }
 
 static inline uint16_t
-i40e_calc_itr_interval(int16_t interval)
+i40e_calc_itr_interval(int16_t interval, bool is_multi_drv)
 {
 {
-       if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
-               interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+       if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) {
+               if (is_multi_drv)
+                       interval = I40E_QUEUE_ITR_INTERVAL_MAX;
+               else
+                       interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+       }
 
        /* Convert to hardware count, as writing each 1 represents 2 us */
        return interval / 2;
 }
 
 
        /* Convert to hardware count, as writing each 1 represents 2 us */
        return interval / 2;
 }
 
+static inline void
+i40e_global_cfg_warning(enum I40E_WARNING_IDX idx)
+{
+       const char *warning;
+       static const char *const warning_list[] = {
+               [I40E_WARNING_DIS_FLX_PLD] = "disable FDIR flexible payload",
+               [I40E_WARNING_ENA_FLX_PLD] = "enable FDIR flexible payload",
+               [I40E_WARNING_QINQ_PARSER] = "support QinQ parser",
+               [I40E_WARNING_QINQ_CLOUD_FILTER] = "support QinQ cloud filter",
+               [I40E_WARNING_TPID] = "support TPID configuration",
+               [I40E_WARNING_FLOW_CTL] = "configure water marker",
+               [I40E_WARNING_GRE_KEY_LEN] = "support GRE key length setting",
+               [I40E_WARNING_QF_CTL] = "support hash function setting",
+               [I40E_WARNING_HASH_INSET] = "configure hash input set",
+               [I40E_WARNING_HSYM] = "set symmetric hash",
+               [I40E_WARNING_HASH_MSK] = "configure hash mask",
+               [I40E_WARNING_FD_MSK] = "configure fdir mask",
+               [I40E_WARNING_RPL_CLD_FILTER] = "replace cloud filter",
+       };
+
+       warning = warning_list[idx];
+
+       RTE_LOG(WARNING, PMD,
+               "Global register is changed during %s\n",
+               warning);
+}
+
 #define I40E_VALID_FLOW(flow_type) \
        ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
        (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
 #define I40E_VALID_FLOW(flow_type) \
        ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
        (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
index 1686914..b19224d 100644 (file)
@@ -1035,14 +1035,16 @@ i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 static void
 i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
 {
 static void
 i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
 {
+       int ret;
        struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
        struct i40e_eth_stats *pstats = NULL;
 
        /* read stat values to clear hardware registers */
        struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
        struct i40e_eth_stats *pstats = NULL;
 
        /* read stat values to clear hardware registers */
-       i40evf_update_stats(dev, &pstats);
+       ret = i40evf_update_stats(dev, &pstats);
 
        /* set stats offset base on current values */
 
        /* set stats offset base on current values */
-       vf->vsi.eth_stats_offset = *pstats;
+       if (ret == 0)
+               vf->vsi.eth_stats_offset = *pstats;
 }
 
 static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
@@ -1246,7 +1248,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
        struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
        struct ether_addr *p_mac_addr;
        uint16_t interval =
        struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
        struct ether_addr *p_mac_addr;
        uint16_t interval =
-               i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
+               i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX, 0);
 
        vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        vf->dev_data = dev->data;
 
        vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        vf->dev_data = dev->data;
@@ -1986,7 +1988,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t interval =
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t interval =
-               i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
+               i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0);
        uint16_t msix_intr;
 
        msix_intr = intr_handle->intr_vec[queue_id];
        uint16_t msix_intr;
 
        msix_intr = intr_handle->intr_vec[queue_id];
@@ -2113,7 +2115,8 @@ i40evf_dev_start(struct rte_eth_dev *dev)
                                        dev->data->nb_tx_queues);
 
        /* check and configure queue intr-vector mapping */
                                        dev->data->nb_tx_queues);
 
        /* check and configure queue intr-vector mapping */
-       if (dev->data->dev_conf.intr_conf.rxq != 0) {
+       if (rte_intr_cap_multiple(intr_handle) &&
+           dev->data->dev_conf.intr_conf.rxq) {
                intr_vector = dev->data->nb_rx_queues;
                if (rte_intr_efd_enable(intr_handle, intr_vector))
                        return -1;
                intr_vector = dev->data->nb_rx_queues;
                if (rte_intr_efd_enable(intr_handle, intr_vector))
                        return -1;
index e78610b..e25b8e0 100644 (file)
@@ -165,7 +165,6 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
 
        rte_wmb();
        /* Init the RX tail regieter. */
 
        rte_wmb();
        /* Init the RX tail regieter. */
-       I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
        I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
 
        return err;
        I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
 
        return err;
@@ -1011,13 +1010,18 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
                                PMD_DRV_LOG(ERR, "invalid programming status"
                                            " reported, error = %u.", error);
                } else
                                PMD_DRV_LOG(ERR, "invalid programming status"
                                            " reported, error = %u.", error);
                } else
-                       PMD_DRV_LOG(ERR, "unknown programming status"
+                       PMD_DRV_LOG(INFO, "unknown programming status"
                                    " reported, len = %d, id = %u.", len, id);
                rxdp->wb.qword1.status_error_len = 0;
                rxq->rx_tail++;
                if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
                        rxq->rx_tail = 0;
                                    " reported, len = %d, id = %u.", len, id);
                rxdp->wb.qword1.status_error_len = 0;
                rxq->rx_tail++;
                if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
                        rxq->rx_tail = 0;
+               if (rxq->rx_tail == 0)
+                       I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+               else
+                       I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
        }
        }
+
        return ret;
 }
 
        return ret;
 }
 
index 86546ca..777ffc2 100644 (file)
@@ -2606,6 +2606,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
        rxq->vsi = pf->fdir.fdir_vsi;
 
        rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
        rxq->vsi = pf->fdir.fdir_vsi;
 
        rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+       memset(rz->addr, 0, I40E_FDIR_NUM_RX_DESC * sizeof(union i40e_rx_desc));
        rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
 
        /*
        rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
 
        /*
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
new file mode 100644 (file)
index 0000000..40d1929
--- /dev/null
@@ -0,0 +1,654 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2017 IBM Corporation.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <altivec.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+       int i;
+       uint16_t rx_id;
+       volatile union i40e_rx_desc *rxdp;
+
+       struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+       struct rte_mbuf *mb0, *mb1;
+
+       vector unsigned long hdr_room = (vector unsigned long){
+                                               RTE_PKTMBUF_HEADROOM,
+                                               RTE_PKTMBUF_HEADROOM};
+       vector unsigned long dma_addr0, dma_addr1;
+
+       rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+       /* Pull 'n' more MBUFs into the software ring */
+       if (rte_mempool_get_bulk(rxq->mp,
+                                (void *)rxep,
+                                RTE_I40E_RXQ_REARM_THRESH) < 0) {
+               if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+                   rxq->nb_rx_desc) {
+                       dma_addr0 = (vector unsigned long){};
+                       for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+                               rxep[i].mbuf = &rxq->fake_mbuf;
+                               vec_st(dma_addr0, 0,
+                                      (vector unsigned long *)&rxdp[i].read);
+                       }
+               }
+               rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+                       RTE_I40E_RXQ_REARM_THRESH;
+               return;
+       }
+
+       /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+       for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+               vector unsigned long vaddr0, vaddr1;
+               uintptr_t p0, p1;
+
+               mb0 = rxep[0].mbuf;
+               mb1 = rxep[1].mbuf;
+
+                /* Flush mbuf with pkt template.
+                 * Data to be rearmed is 6 bytes long.
+                 * Though, RX will overwrite ol_flags that are coming next
+                 * anyway. So overwrite whole 8 bytes with one load:
+                 * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+                 */
+               p0 = (uintptr_t)&mb0->rearm_data;
+               *(uint64_t *)p0 = rxq->mbuf_initializer;
+               p1 = (uintptr_t)&mb1->rearm_data;
+               *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+               /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+               vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
+               vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
+
+               /* convert pa to dma_addr hdr/data */
+               dma_addr0 = vec_mergel(vaddr0, vaddr0);
+               dma_addr1 = vec_mergel(vaddr1, vaddr1);
+
+               /* add headroom to pa values */
+               dma_addr0 = vec_add(dma_addr0, hdr_room);
+               dma_addr1 = vec_add(dma_addr1, hdr_room);
+
+               /* flush desc with pa dma_addr */
+               vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
+               vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
+       }
+
+       rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+       if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+               rxq->rxrearm_start = 0;
+
+       rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+       rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+                            (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+       /* Update the tail pointer on the NIC */
+       I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+
+static inline void
+desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+{
+       vector unsigned int vlan0, vlan1, rss, l3_l4e;
+
+       /* mask everything except RSS, flow director and VLAN flags
+        * bit2 is for VLAN tag, bit11 for flow director indication
+        * bit13:12 for RSS indication.
+        */
+       const vector unsigned int rss_vlan_msk = (vector unsigned int){
+                       (int32_t)0x1c03804, (int32_t)0x1c03804,
+                       (int32_t)0x1c03804, (int32_t)0x1c03804};
+
+       /* map rss and vlan type to rss hash and vlan flag */
+       const vector unsigned char vlan_flags = (vector unsigned char){
+                       0, 0, 0, 0,
+                       PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+                       0, 0, 0, 0,
+                       0, 0, 0, 0};
+
+       const vector unsigned char rss_flags = (vector unsigned char){
+                       0, PKT_RX_FDIR, 0, 0,
+                       0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+                       0, 0, 0, 0,
+                       0, 0, 0, 0};
+
+       const vector unsigned char l3_l4e_flags = (vector unsigned char){
+                       0,
+                       PKT_RX_IP_CKSUM_BAD,
+                       PKT_RX_L4_CKSUM_BAD,
+                       PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+                       PKT_RX_EIP_CKSUM_BAD,
+                       PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+                       PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+                       PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+                                            | PKT_RX_IP_CKSUM_BAD,
+                       0, 0, 0, 0, 0, 0, 0, 0};
+
+       vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
+       vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
+       vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
+
+       vlan1 = vec_and(vlan0, rss_vlan_msk);
+       vlan0 = (vector unsigned int)vec_perm(vlan_flags,
+                                       (vector unsigned char){},
+                                       *(vector unsigned char *)&vlan1);
+
+       rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
+       rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
+                                       *(vector unsigned char *)&rss);
+
+       l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
+       l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
+                                       (vector unsigned char){},
+                                       *(vector unsigned char *)&l3_l4e);
+
+       vlan0 = vec_or(vlan0, rss);
+       vlan0 = vec_or(vlan0, l3_l4e);
+
+       rx_pkts[0]->ol_flags = (uint64_t)vlan0[2];
+       rx_pkts[1]->ol_flags = (uint64_t)vlan0[3];
+       rx_pkts[2]->ol_flags = (uint64_t)vlan0[0];
+       rx_pkts[3]->ol_flags = (uint64_t)vlan0[1];
+}
+#else
+#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#endif
+
+#define PKTLEN_SHIFT     10
+
+static inline void
+desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+{
+       vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
+       vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
+
+       ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
+       ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
+
+       rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(
+                                       (*(vector unsigned char *)&ptype0)[0]);
+       rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(
+                                       (*(vector unsigned char *)&ptype0)[8]);
+       rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(
+                                       (*(vector unsigned char *)&ptype1)[0]);
+       rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(
+                                       (*(vector unsigned char *)&ptype1)[8]);
+}
+
+ /* Notice:
+  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+  *   numbers of DD bits
+  */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+                  uint16_t nb_pkts, uint8_t *split_packet)
+{
+       volatile union i40e_rx_desc *rxdp;
+       struct i40e_rx_entry *sw_ring;
+       uint16_t nb_pkts_recd;
+       int pos;
+       uint64_t var;
+       vector unsigned char shuf_msk;
+
+       vector unsigned short crc_adjust = (vector unsigned short){
+               0, 0,         /* ignore pkt_type field */
+               rxq->crc_len, /* sub crc on pkt_len */
+               0,            /* ignore high-16bits of pkt_len */
+               rxq->crc_len, /* sub crc on data_len */
+               0, 0, 0       /* ignore non-length fields */
+               };
+       vector unsigned long dd_check, eop_check;
+
+       /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+       nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+       /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+       nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+       /* Just the act of getting into the function from the application is
+        * going to cost about 7 cycles
+        */
+       rxdp = rxq->rx_ring + rxq->rx_tail;
+
+       rte_prefetch0(rxdp);
+
+       /* See if we need to rearm the RX queue - gives the prefetch a bit
+        * of time to act
+        */
+       if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+               i40e_rxq_rearm(rxq);
+
+       /* Before we start moving massive data around, check to see if
+        * there is actually a packet available
+        */
+       if (!(rxdp->wb.qword1.status_error_len &
+                       rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+               return 0;
+
+       /* 4 packets DD mask */
+       dd_check = (vector unsigned long){0x0000000100000001ULL,
+                                         0x0000000100000001ULL};
+
+       /* 4 packets EOP mask */
+       eop_check = (vector unsigned long){0x0000000200000002ULL,
+                                          0x0000000200000002ULL};
+
+       /* mask to shuffle from desc. to mbuf */
+       shuf_msk = (vector unsigned char){
+               0xFF, 0xFF,   /* pkt_type set as unknown */
+               0xFF, 0xFF,   /* pkt_type set as unknown */
+               14, 15,       /* octet 15~14, low 16 bits pkt_len */
+               0xFF, 0xFF,   /* skip high 16 bits pkt_len, zero out */
+               14, 15,       /* octet 15~14, 16 bits data_len */
+               2, 3,         /* octet 2~3, low 16 bits vlan_macip */
+               4, 5, 6, 7    /* octet 4~7, 32bits rss */
+               };
+
+       /* Cache is empty -> need to scan the buffer rings, but first move
+        * the next 'n' mbufs into the cache
+        */
+       sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+       /* A. load 4 packet in one loop
+        * [A*. mask out 4 unused dirty field in desc]
+        * B. copy 4 mbuf point from swring to rx_pkts
+        * C. calc the number of DD bits among the 4 packets
+        * [C*. extract the end-of-packet bit, if requested]
+        * D. fill info. from desc to mbuf
+        */
+
+       for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+                       pos += RTE_I40E_DESCS_PER_LOOP,
+                       rxdp += RTE_I40E_DESCS_PER_LOOP) {
+               vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
+               vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+               vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
+               vector unsigned long mbp1, mbp2; /* two mbuf pointer
+                                                 * in one XMM reg.
+                                                 */
+
+               /* B.1 load 1 mbuf point */
+               mbp1 = *(vector unsigned long *)&sw_ring[pos];
+               /* Read desc statuses backwards to avoid race condition */
+               /* A.1 load 4 pkts desc */
+               descs[3] = *(vector unsigned long *)(rxdp + 3);
+               rte_compiler_barrier();
+
+               /* B.2 copy 2 mbuf point into rx_pkts  */
+               *(vector unsigned long *)&rx_pkts[pos] = mbp1;
+
+               /* B.1 load 1 mbuf point */
+               mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
+
+               descs[2] = *(vector unsigned long *)(rxdp + 2);
+               rte_compiler_barrier();
+               /* B.1 load 2 mbuf point */
+               descs[1] = *(vector unsigned long *)(rxdp + 1);
+               rte_compiler_barrier();
+               descs[0] = *(vector unsigned long *)(rxdp);
+
+               /* B.2 copy 2 mbuf point into rx_pkts  */
+               *(vector unsigned long *)&rx_pkts[pos + 2] =  mbp2;
+
+               if (split_packet) {
+                       rte_mbuf_prefetch_part2(rx_pkts[pos]);
+                       rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+                       rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+                       rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+               }
+
+               /* avoid compiler reorder optimization */
+               rte_compiler_barrier();
+
+               /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+               const vector unsigned int len3 = vec_sl(
+                       vec_ld(0, (vector unsigned int *)&descs[3]),
+                       (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+               const vector unsigned int len2 = vec_sl(
+                       vec_ld(0, (vector unsigned int *)&descs[2]),
+                       (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+               /* merge the now-aligned packet length fields back in */
+               descs[3] = (vector unsigned long)len3;
+               descs[2] = (vector unsigned long)len2;
+
+               /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+               pkt_mb4 = vec_perm((vector unsigned char)descs[3],
+                                 (vector unsigned char){}, shuf_msk);
+               pkt_mb3 = vec_perm((vector unsigned char)descs[2],
+                                 (vector unsigned char){}, shuf_msk);
+
+               /* C.1 4=>2 filter staterr info only */
+               sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
+                                       (vector unsigned short)descs[2]);
+               /* C.1 4=>2 filter staterr info only */
+               sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
+                                       (vector unsigned short)descs[0]);
+               /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+               pkt_mb4 = (vector unsigned char)vec_sub(
+                               (vector unsigned short)pkt_mb4, crc_adjust);
+               pkt_mb3 = (vector unsigned char)vec_sub(
+                               (vector unsigned short)pkt_mb3, crc_adjust);
+
+               /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+               const vector unsigned int len1 = vec_sl(
+                       vec_ld(0, (vector unsigned int *)&descs[1]),
+                       (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+               const vector unsigned int len0 = vec_sl(
+                       vec_ld(0, (vector unsigned int *)&descs[0]),
+                       (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+               /* merge the now-aligned packet length fields back in */
+               descs[1] = (vector unsigned long)len1;
+               descs[0] = (vector unsigned long)len0;
+
+               /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+               pkt_mb2 = vec_perm((vector unsigned char)descs[1],
+                                  (vector unsigned char){}, shuf_msk);
+               pkt_mb1 = vec_perm((vector unsigned char)descs[0],
+                                  (vector unsigned char){}, shuf_msk);
+
+               /* C.2 get 4 pkts staterr value  */
+               staterr = (vector unsigned short)vec_mergeh(
+                               sterr_tmp1, sterr_tmp2);
+
+               /* D.3 copy final 3,4 data to rx_pkts */
+               vec_st(pkt_mb4, 0,
+                (vector unsigned char *)&rx_pkts[pos + 3]
+                       ->rx_descriptor_fields1
+               );
+               vec_st(pkt_mb3, 0,
+                (vector unsigned char *)&rx_pkts[pos + 2]
+                       ->rx_descriptor_fields1
+               );
+
+               /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+               pkt_mb2 = (vector unsigned char)vec_sub(
+                               (vector unsigned short)pkt_mb2, crc_adjust);
+               pkt_mb1 = (vector unsigned char)vec_sub(
+                               (vector unsigned short)pkt_mb1, crc_adjust);
+
+               /* C* extract and record EOP bit */
+               if (split_packet) {
+                       vector unsigned char eop_shuf_mask =
+                               (vector unsigned char){
+                                       0xFF, 0xFF, 0xFF, 0xFF,
+                                       0xFF, 0xFF, 0xFF, 0xFF,
+                                       0xFF, 0xFF, 0xFF, 0xFF,
+                                       0x04, 0x0C, 0x00, 0x08
+                               };
+
+                       /* and with mask to extract bits, flipping 1-0 */
+                       vector unsigned char eop_bits = vec_and(
+                               (vector unsigned char)vec_nor(staterr, staterr),
+                               (vector unsigned char)eop_check);
+                       /* the staterr values are not in order, as the count
+                        * count of dd bits doesn't care. However, for end of
+                        * packet tracking, we do care, so shuffle. This also
+                        * compresses the 32-bit values to 8-bit
+                        */
+                       eop_bits = vec_perm(eop_bits, (vector unsigned char){},
+                                           eop_shuf_mask);
+                       /* store the resulting 32-bit value */
+                       *split_packet = (vec_ld(0,
+                                        (vector unsigned int *)&eop_bits))[0];
+                       split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+                       /* zero-out next pointers */
+                       rx_pkts[pos]->next = NULL;
+                       rx_pkts[pos + 1]->next = NULL;
+                       rx_pkts[pos + 2]->next = NULL;
+                       rx_pkts[pos + 3]->next = NULL;
+               }
+
+               /* C.3 calc available number of desc */
+               staterr = vec_and(staterr, (vector unsigned short)dd_check);
+
+               /* D.3 copy final 1,2 data to rx_pkts */
+               vec_st(pkt_mb2, 0,
+                (vector unsigned char *)&rx_pkts[pos + 1]
+                       ->rx_descriptor_fields1
+               );
+               vec_st(pkt_mb1, 0,
+                (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
+               );
+               desc_to_ptype_v(descs, &rx_pkts[pos]);
+               desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+               /* C.4 calc avaialbe number of desc */
+               var = __builtin_popcountll((vec_ld(0,
+                       (vector unsigned long *)&staterr)[0]));
+               nb_pkts_recd += var;
+               if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+                       break;
+       }
+
+       /* Update our internal tail pointer */
+       rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+       rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+       rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+       return nb_pkts_recd;
+}
+
+ /* Notice:
+  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+  *   numbers of DD bits
+  */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+                  uint16_t nb_pkts)
+{
+       return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+  * Notice:
+  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+  *   numbers of DD bits
+  */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+                            uint16_t nb_pkts)
+{
+       struct i40e_rx_queue *rxq = rx_queue;
+       uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+       /* get some new buffers */
+       uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+                       split_flags);
+       if (nb_bufs == 0)
+               return 0;
+
+       /* happy day case, full burst + no packets to be joined */
+       const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+       if (rxq->pkt_first_seg == NULL &&
+           split_fl64[0] == 0 && split_fl64[1] == 0 &&
+           split_fl64[2] == 0 && split_fl64[3] == 0)
+               return nb_bufs;
+
+       /* reassemble any packets that need reassembly*/
+       unsigned int i = 0;
+
+       if (!rxq->pkt_first_seg) {
+               /* find the first split flag, and only reassemble then*/
+               while (i < nb_bufs && !split_flags[i])
+                       i++;
+               if (i == nb_bufs)
+                       return nb_bufs;
+       }
+       return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+               &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+       struct rte_mbuf *pkt, uint64_t flags)
+{
+       uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+               ((uint64_t)flags  << I40E_TXD_QW1_CMD_SHIFT) |
+               ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+       vector unsigned long descriptor = (vector unsigned long){
+               pkt->buf_physaddr + pkt->data_off, high_qw};
+       *(vector unsigned long *)txdp = descriptor;
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+       struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
+{
+       int i;
+
+       for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+               vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+                  uint16_t nb_pkts)
+{
+       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       volatile struct i40e_tx_desc *txdp;
+       struct i40e_tx_entry *txep;
+       uint16_t n, nb_commit, tx_id;
+       uint64_t flags = I40E_TD_CMD;
+       uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+       int i;
+
+       /* cross rx_thresh boundary is not allowed */
+       nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+       if (txq->nb_tx_free < txq->tx_free_thresh)
+               i40e_tx_free_bufs(txq);
+
+       nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+       nb_commit = nb_pkts;
+       if (unlikely(nb_pkts == 0))
+               return 0;
+
+       tx_id = txq->tx_tail;
+       txdp = &txq->tx_ring[tx_id];
+       txep = &txq->sw_ring[tx_id];
+
+       txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+       n = (uint16_t)(txq->nb_tx_desc - tx_id);
+       if (nb_commit >= n) {
+               tx_backlog_entry(txep, tx_pkts, n);
+
+               for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+                       vtx1(txdp, *tx_pkts, flags);
+
+               vtx1(txdp, *tx_pkts++, rs);
+
+               nb_commit = (uint16_t)(nb_commit - n);
+
+               tx_id = 0;
+               txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+               /* avoid reach the end of ring */
+               txdp = &txq->tx_ring[tx_id];
+               txep = &txq->sw_ring[tx_id];
+       }
+
+       tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+       vtx(txdp, tx_pkts, nb_commit, flags);
+
+       tx_id = (uint16_t)(tx_id + nb_commit);
+       if (tx_id > txq->tx_next_rs) {
+               txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+                       rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+                                               I40E_TXD_QW1_CMD_SHIFT);
+               txq->tx_next_rs =
+                       (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+       }
+
+       txq->tx_tail = tx_id;
+
+       I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+       return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+       _i40e_rx_queue_release_mbufs_vec(rxq);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+       return i40e_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
+{
+       return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+       return i40e_rx_vec_dev_conf_condition_check_default(dev);
+}
index 832242e..9543fe1 100644 (file)
@@ -87,6 +87,9 @@ void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
                mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
                mac->ops.set_rate_select_speed =
                                               ixgbe_set_hard_rate_select_speed;
                mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
                mac->ops.set_rate_select_speed =
                                               ixgbe_set_hard_rate_select_speed;
+               if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed)
+                       mac->ops.set_rate_select_speed =
+                                              ixgbe_set_soft_rate_select_speed;
        } else {
                if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
                     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
        } else {
                if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
                     (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -564,6 +567,10 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_82599_QSFP_SF_QP:
                media_type = ixgbe_media_type_fiber_qsfp;
                break;
        case IXGBE_DEV_ID_82599_QSFP_SF_QP:
                media_type = ixgbe_media_type_fiber_qsfp;
                break;
+       case IXGBE_DEV_ID_82599_BYPASS:
+               media_type = ixgbe_media_type_fiber_fixed;
+               hw->phy.multispeed_fiber = true;
+               break;
        default:
                media_type = ixgbe_media_type_unknown;
                break;
        default:
                media_type = ixgbe_media_type_unknown;
                break;
index 094ee52..368bf7d 100644 (file)
@@ -178,6 +178,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_82599EN_SFP:
        case IXGBE_DEV_ID_82599_CX4:
        case IXGBE_DEV_ID_82599_LS:
        case IXGBE_DEV_ID_82599EN_SFP:
        case IXGBE_DEV_ID_82599_CX4:
        case IXGBE_DEV_ID_82599_LS:
+       case IXGBE_DEV_ID_82599_BYPASS:
        case IXGBE_DEV_ID_82599_T3_LOM:
                hw->mac.type = ixgbe_mac_82599EB;
                break;
        case IXGBE_DEV_ID_82599_T3_LOM:
                hw->mac.type = ixgbe_mac_82599EB;
                break;
@@ -192,6 +193,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
                break;
        case IXGBE_DEV_ID_X540T:
        case IXGBE_DEV_ID_X540T1:
                break;
        case IXGBE_DEV_ID_X540T:
        case IXGBE_DEV_ID_X540T1:
+       case IXGBE_DEV_ID_X540_BYPASS:
                hw->mac.type = ixgbe_mac_X540;
                hw->mvals = ixgbe_mvals_X540;
                break;
                hw->mac.type = ixgbe_mac_X540;
                hw->mvals = ixgbe_mvals_X540;
                break;
index cca19ef..e54fd30 100644 (file)
@@ -166,6 +166,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
        DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
 
        switch (hw->phy.media_type) {
        DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
 
        switch (hw->phy.media_type) {
+       case ixgbe_media_type_fiber_fixed:
        case ixgbe_media_type_fiber_qsfp:
        case ixgbe_media_type_fiber:
                /* flow control autoneg black list */
        case ixgbe_media_type_fiber_qsfp:
        case ixgbe_media_type_fiber:
                /* flow control autoneg black list */
@@ -196,6 +197,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
                case IXGBE_DEV_ID_82599_T3_LOM:
                case IXGBE_DEV_ID_X540T:
                case IXGBE_DEV_ID_X540T1:
                case IXGBE_DEV_ID_82599_T3_LOM:
                case IXGBE_DEV_ID_X540T:
                case IXGBE_DEV_ID_X540T1:
+               case IXGBE_DEV_ID_X540_BYPASS:
                case IXGBE_DEV_ID_X550T:
                case IXGBE_DEV_ID_X550T1:
                case IXGBE_DEV_ID_X550EM_X_10G_T:
                case IXGBE_DEV_ID_X550T:
                case IXGBE_DEV_ID_X550T1:
                case IXGBE_DEV_ID_X550EM_X_10G_T:
@@ -261,6 +263,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
                        goto out;
 
                /* only backplane uses autoc so fall though */
                        goto out;
 
                /* only backplane uses autoc so fall though */
+       case ixgbe_media_type_fiber_fixed:
        case ixgbe_media_type_fiber_qsfp:
        case ixgbe_media_type_fiber:
                reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
        case ixgbe_media_type_fiber_qsfp:
        case ixgbe_media_type_fiber:
                reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -3068,6 +3071,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 
        switch (hw->phy.media_type) {
        /* Autoneg flow control on fiber adapters */
 
        switch (hw->phy.media_type) {
        /* Autoneg flow control on fiber adapters */
+       case ixgbe_media_type_fiber_fixed:
        case ixgbe_media_type_fiber_qsfp:
        case ixgbe_media_type_fiber:
                if (speed == IXGBE_LINK_SPEED_1GB_FULL)
        case ixgbe_media_type_fiber_qsfp:
        case ixgbe_media_type_fiber:
                if (speed == IXGBE_LINK_SPEED_1GB_FULL)
@@ -4552,7 +4556,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
        /* first pull in the header so we know the buffer length */
        for (bi = 0; bi < dword_len; bi++) {
                buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
        /* first pull in the header so we know the buffer length */
        for (bi = 0; bi < dword_len; bi++) {
                buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
-               IXGBE_LE32_TO_CPUS(&buffer[bi]);
+               IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
        }
 
        /* If there is any thing in data position pull it in */
        }
 
        /* If there is any thing in data position pull it in */
@@ -4572,7 +4576,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
        /* Pull in the rest of the buffer (bi is where we left off) */
        for (; bi <= dword_len; bi++) {
                buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
        /* Pull in the rest of the buffer (bi is where we left off) */
        for (; bi <= dword_len; bi++) {
                buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
-               IXGBE_LE32_TO_CPUS(&buffer[bi]);
+               IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
        }
 
 rel_out:
        }
 
 rel_out:
@@ -5065,6 +5069,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
 
                /* Set the module link speed */
                switch (hw->phy.media_type) {
 
                /* Set the module link speed */
                switch (hw->phy.media_type) {
+               case ixgbe_media_type_fiber_fixed:
                case ixgbe_media_type_fiber:
                        ixgbe_set_rate_select_speed(hw,
                                                    IXGBE_LINK_SPEED_10GB_FULL);
                case ixgbe_media_type_fiber:
                        ixgbe_set_rate_select_speed(hw,
                                                    IXGBE_LINK_SPEED_10GB_FULL);
@@ -5115,6 +5120,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
 
                /* Set the module link speed */
                switch (hw->phy.media_type) {
 
                /* Set the module link speed */
                switch (hw->phy.media_type) {
+               case ixgbe_media_type_fiber_fixed:
                case ixgbe_media_type_fiber:
                        ixgbe_set_rate_select_speed(hw,
                                                    IXGBE_LINK_SPEED_1GB_FULL);
                case ixgbe_media_type_fiber:
                        ixgbe_set_rate_select_speed(hw,
                                                    IXGBE_LINK_SPEED_1GB_FULL);
index 042e5cc..2785bba 100644 (file)
@@ -444,17 +444,6 @@ STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
        for (i = 0; i < size; i++)
                IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
 
        for (i = 0; i < size; i++)
                IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
 
-       /*
-        * Complete the remaining mailbox data registers with zero to reset
-        * the data sent in a previous exchange (in either side) with the PF,
-        * including exchanges performed by another Guest OS to which that VF
-        * was previously assigned.
-        */
-       while (i < hw->mbx.size) {
-               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, 0);
-               i++;
-       }
-
        /* update stats */
        hw->mbx.stats.msgs_tx++;
 
        /* update stats */
        hw->mbx.stats.msgs_tx++;
 
@@ -693,17 +682,6 @@ STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
        for (i = 0; i < size; i++)
                IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
 
        for (i = 0; i < size; i++)
                IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
 
-       /*
-        * Complete the remaining mailbox data registers with zero to reset
-        * the data sent in a previous exchange (in either side) with the VF,
-        * including exchanges performed by another Guest OS to which that VF
-        * was previously assigned.
-        */
-       while (i < hw->mbx.size) {
-               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, 0);
-               i++;
-       }
-
        /* Interrupt VF to tell it a message has been sent and release buffer*/
        IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
 
        /* Interrupt VF to tell it a message has been sent and release buffer*/
        IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
 
index 4982e03..b12573c 100644 (file)
@@ -123,9 +123,11 @@ POSSIBILITY OF SUCH DAMAGE.
 #define IXGBE_DEV_ID_82599_VF                  0x10ED
 #define IXGBE_DEV_ID_82599_VF_HV               0x152E
 #define IXGBE_DEV_ID_82599_LS                  0x154F
 #define IXGBE_DEV_ID_82599_VF                  0x10ED
 #define IXGBE_DEV_ID_82599_VF_HV               0x152E
 #define IXGBE_DEV_ID_82599_LS                  0x154F
+#define IXGBE_DEV_ID_82599_BYPASS              0x155D
 #define IXGBE_DEV_ID_X540T                     0x1528
 #define IXGBE_DEV_ID_X540_VF                   0x1515
 #define IXGBE_DEV_ID_X540_VF_HV                        0x1530
 #define IXGBE_DEV_ID_X540T                     0x1528
 #define IXGBE_DEV_ID_X540_VF                   0x1515
 #define IXGBE_DEV_ID_X540_VF_HV                        0x1530
+#define IXGBE_DEV_ID_X540_BYPASS               0x155C
 #define IXGBE_DEV_ID_X540T1                    0x1560
 #define IXGBE_DEV_ID_X550T                     0x1563
 #define IXGBE_DEV_ID_X550T1                    0x15D1
 #define IXGBE_DEV_ID_X540T1                    0x1560
 #define IXGBE_DEV_ID_X550T                     0x1563
 #define IXGBE_DEV_ID_X550T1                    0x15D1
@@ -270,7 +272,6 @@ POSSIBILITY OF SUCH DAMAGE.
 #define IXGBE_I2C_BB_EN_X550           0x00000100
 #define IXGBE_I2C_BB_EN_X550EM_x       IXGBE_I2C_BB_EN_X550
 #define IXGBE_I2C_BB_EN_X550EM_a       IXGBE_I2C_BB_EN_X550
 #define IXGBE_I2C_BB_EN_X550           0x00000100
 #define IXGBE_I2C_BB_EN_X550EM_x       IXGBE_I2C_BB_EN_X550
 #define IXGBE_I2C_BB_EN_X550EM_a       IXGBE_I2C_BB_EN_X550
-
 #define IXGBE_I2C_BB_EN_BY_MAC(_hw)    IXGBE_BY_MAC((_hw), I2C_BB_EN)
 
 #define IXGBE_I2C_CLK_OE_N_EN          0
 #define IXGBE_I2C_BB_EN_BY_MAC(_hw)    IXGBE_BY_MAC((_hw), I2C_BB_EN)
 
 #define IXGBE_I2C_CLK_OE_N_EN          0
@@ -3626,6 +3627,7 @@ enum ixgbe_sfp_type {
 enum ixgbe_media_type {
        ixgbe_media_type_unknown = 0,
        ixgbe_media_type_fiber,
 enum ixgbe_media_type {
        ixgbe_media_type_unknown = 0,
        ixgbe_media_type_fiber,
+       ixgbe_media_type_fiber_fixed,
        ixgbe_media_type_fiber_qsfp,
        ixgbe_media_type_fiber_lco,
        ixgbe_media_type_copper,
        ixgbe_media_type_fiber_qsfp,
        ixgbe_media_type_fiber_lco,
        ixgbe_media_type_copper,
index 73996bb..2dc69ff 100644 (file)
@@ -93,6 +93,9 @@
 /* Timer value included in XOFF frames. */
 #define IXGBE_FC_PAUSE 0x680
 
 /* Timer value included in XOFF frames. */
 #define IXGBE_FC_PAUSE 0x680
 
+/*Default value of Max Rx Queue*/
+#define IXGBE_MAX_RX_QUEUE_NUM 128
+
 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
@@ -249,6 +252,8 @@ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
+static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
+                                  int wait_to_complete);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
@@ -619,7 +624,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .dev_configure        = ixgbevf_dev_configure,
        .dev_start            = ixgbevf_dev_start,
        .dev_stop             = ixgbevf_dev_stop,
        .dev_configure        = ixgbevf_dev_configure,
        .dev_start            = ixgbevf_dev_start,
        .dev_stop             = ixgbevf_dev_stop,
-       .link_update          = ixgbe_dev_link_update,
+       .link_update          = ixgbevf_dev_link_update,
        .stats_get            = ixgbevf_dev_stats_get,
        .xstats_get           = ixgbevf_dev_xstats_get,
        .stats_reset          = ixgbevf_dev_stats_reset,
        .stats_get            = ixgbevf_dev_stats_get,
        .xstats_get           = ixgbevf_dev_xstats_get,
        .stats_reset          = ixgbevf_dev_stats_reset,
@@ -1959,9 +1964,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
                return -EINVAL;
        }
 
                return -EINVAL;
        }
 
-       RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
-       RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
-
+       RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+               IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+       RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+               dev->pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
        return 0;
 }
 
        return 0;
 }
 
@@ -2001,8 +2007,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
                case ETH_MQ_RX_NONE:
                        /* if nothing mq mode configure, use default scheme */
                        dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
                case ETH_MQ_RX_NONE:
                        /* if nothing mq mode configure, use default scheme */
                        dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
-                       if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
-                               RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
                        break;
                default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
                        /* SRIOV only works in VMDq enable mode */
                        break;
                default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
                        /* SRIOV only works in VMDq enable mode */
@@ -3217,15 +3221,123 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
        dev_info->tx_desc_lim = tx_desc_lim;
 }
 
        dev_info->tx_desc_lim = tx_desc_lim;
 }
 
+static int
+ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                  int *link_up, int wait_to_complete)
+{
+       /**
+        * for a quick link status checking, wait_to_compelet == 0,
+        * skip PF link status checking
+        */
+       bool no_pflink_check = wait_to_complete == 0;
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       struct ixgbe_mac_info *mac = &hw->mac;
+       uint32_t links_reg, in_msg;
+       int ret_val = 0;
+
+       /* If we were hit with a reset drop the link */
+       if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+               mac->get_link_status = true;
+
+       if (!mac->get_link_status)
+               goto out;
+
+       /* if link status is down no point in checking to see if pf is up */
+       links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+       if (!(links_reg & IXGBE_LINKS_UP))
+               goto out;
+
+       /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+        * before the link status is correct
+        */
+       if (mac->type == ixgbe_mac_82599_vf) {
+               int i;
+
+               for (i = 0; i < 5; i++) {
+                       rte_delay_us(100);
+                       links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+                       if (!(links_reg & IXGBE_LINKS_UP))
+                               goto out;
+               }
+       }
+
+       switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+       case IXGBE_LINKS_SPEED_10G_82599:
+               *speed = IXGBE_LINK_SPEED_10GB_FULL;
+               if (hw->mac.type >= ixgbe_mac_X550) {
+                       if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+                               *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+               }
+               break;
+       case IXGBE_LINKS_SPEED_1G_82599:
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+               break;
+       case IXGBE_LINKS_SPEED_100_82599:
+               *speed = IXGBE_LINK_SPEED_100_FULL;
+               if (hw->mac.type == ixgbe_mac_X550) {
+                       if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+                               *speed = IXGBE_LINK_SPEED_5GB_FULL;
+               }
+               break;
+       case IXGBE_LINKS_SPEED_10_X550EM_A:
+               *speed = IXGBE_LINK_SPEED_UNKNOWN;
+               /* Since Reserved in older MAC's */
+               if (hw->mac.type >= ixgbe_mac_X550)
+                       *speed = IXGBE_LINK_SPEED_10_FULL;
+               break;
+       default:
+               *speed = IXGBE_LINK_SPEED_UNKNOWN;
+       }
+
+       if (no_pflink_check) {
+               if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
+                       mac->get_link_status = true;
+               else
+                       mac->get_link_status = false;
+
+               goto out;
+       }
+       /* if the read failed it could just be a mailbox collision, best wait
+        * until we are called again and don't report an error
+        */
+       if (mbx->ops.read(hw, &in_msg, 1, 0))
+               goto out;
+
+       if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+               /* msg is not CTS and is NACK we must have lost CTS status */
+               if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+                       ret_val = -1;
+               goto out;
+       }
+
+       /* the pf is talking, if we timed out in the past we reinit */
+       if (!mbx->timeout) {
+               ret_val = -1;
+               goto out;
+       }
+
+       /* if we passed all the tests above then the link is up and we no
+        * longer need to check for link
+        */
+       mac->get_link_status = false;
+
+out:
+       *link_up = !mac->get_link_status;
+       return ret_val;
+}
+
 /* return 0 means link status changed, -1 means not changed */
 static int
 /* return 0 means link status changed, -1 means not changed */
 static int
-ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+                           int wait_to_complete, int vf)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_eth_link link, old;
        ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        int link_up;
        int diag;
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_eth_link link, old;
        ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        int link_up;
        int diag;
+       int wait = 1;
 
        link.link_status = ETH_LINK_DOWN;
        link.link_speed = 0;
 
        link.link_status = ETH_LINK_DOWN;
        link.link_speed = 0;
@@ -3238,9 +3350,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 
        /* check if it needs to wait to complete, if lsc interrupt is enabled */
        if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
 
        /* check if it needs to wait to complete, if lsc interrupt is enabled */
        if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
-               diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
+               wait = 0;
+
+       if (vf)
+               diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
        else
        else
-               diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
+               diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
 
        if (diag != 0) {
                link.link_speed = ETH_SPEED_NUM_100M;
 
        if (diag != 0) {
                link.link_speed = ETH_SPEED_NUM_100M;
@@ -3287,6 +3402,18 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
        return 0;
 }
 
        return 0;
 }
 
+static int
+ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+       return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
+}
+
+static int
+ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+       return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
+}
+
 static void
 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
 {
 static void
 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
 {
@@ -4206,7 +4333,11 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
 
        PMD_INIT_FUNC_TRACE();
 
-       hw->mac.ops.reset_hw(hw);
+       err = hw->mac.ops.reset_hw(hw);
+       if (err) {
+               PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
+               return err;
+       }
        hw->mac.get_link_status = true;
 
        /* negotiate mailbox API version to use with the PF. */
        hw->mac.get_link_status = true;
 
        /* negotiate mailbox API version to use with the PF. */
@@ -4233,7 +4364,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        ixgbevf_dev_rxtx_start(dev);
 
        /* check and configure queue intr-vector mapping */
        ixgbevf_dev_rxtx_start(dev);
 
        /* check and configure queue intr-vector mapping */
-       if (dev->data->dev_conf.intr_conf.rxq != 0) {
+       if (rte_intr_cap_multiple(intr_handle) &&
+           dev->data->dev_conf.intr_conf.rxq != 0) {
                intr_vector = dev->data->nb_rx_queues;
                if (rte_intr_efd_enable(intr_handle, intr_vector))
                        return -1;
                intr_vector = dev->data->nb_rx_queues;
                if (rte_intr_efd_enable(intr_handle, intr_vector))
                        return -1;
@@ -7569,12 +7701,17 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        u32 in_msg = 0;
 
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        u32 in_msg = 0;
 
-       if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
-               return;
+       /* peek the message first */
+       in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
 
        /* PF reset VF event */
 
        /* PF reset VF event */
-       if (in_msg == IXGBE_PF_CONTROL_MSG)
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
+       if (in_msg == IXGBE_PF_CONTROL_MSG) {
+               /* dummy mbx read to ack pf */
+               if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+                       return;
+               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+                                             NULL);
+       }
 }
 
 static int
 }
 
 static int
index 79b7a60..0ffb940 100644 (file)
@@ -160,6 +160,22 @@ priv_lock(struct priv *priv)
        rte_spinlock_lock(&priv->lock);
 }
 
        rte_spinlock_lock(&priv->lock);
 }
 
+/**
+ * Try to lock private structure to protect it from concurrent access in the
+ * control path.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int
+priv_trylock(struct priv *priv)
+{
+       return rte_spinlock_trylock(&priv->lock);
+}
+
 /**
  * Unlock private structure.
  *
 /**
  * Unlock private structure.
  *
index 2ea995f..e98959f 100644 (file)
@@ -658,6 +658,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
                               (*priv->rss_conf)[0]->rss_key_len :
                               0);
        info->speed_capa = priv->link_speed_capa;
                               (*priv->rss_conf)[0]->rss_key_len :
                               0);
        info->speed_capa = priv->link_speed_capa;
+       info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
        priv_unlock(priv);
 }
 
        priv_unlock(priv);
 }
 
@@ -985,9 +986,7 @@ recover:
                /* Provide new values to rxq_setup(). */
                dev->data->dev_conf.rxmode.jumbo_frame = sp;
                dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
                /* Provide new values to rxq_setup(). */
                dev->data->dev_conf.rxmode.jumbo_frame = sp;
                dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
-               if (rehash)
-                       ret = rxq_rehash(dev, rxq_ctrl);
-               else
+               if (!rehash)
                        ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
                                             rxq_ctrl->socket, NULL, rxq->mp);
                if (!ret)
                        ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
                                             rxq_ctrl->socket, NULL, rxq->mp);
                if (!ret)
@@ -1239,8 +1238,12 @@ mlx5_dev_link_status_handler(void *arg)
        struct priv *priv = dev->data->dev_private;
        int ret;
 
        struct priv *priv = dev->data->dev_private;
        int ret;
 
-       priv_lock(priv);
-       assert(priv->pending_alarm == 1);
+       while (!priv_trylock(priv)) {
+               /* Alarm is being canceled. */
+               if (priv->pending_alarm == 0)
+                       return;
+               rte_pause();
+       }
        priv->pending_alarm = 0;
        ret = priv_dev_link_status_handler(priv, dev);
        priv_unlock(priv);
        priv->pending_alarm = 0;
        ret = priv_dev_link_status_handler(priv, dev);
        priv_unlock(priv);
@@ -1287,9 +1290,10 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
        rte_intr_callback_unregister(&priv->intr_handle,
                                     mlx5_dev_interrupt_handler,
                                     dev);
        rte_intr_callback_unregister(&priv->intr_handle,
                                     mlx5_dev_interrupt_handler,
                                     dev);
-       if (priv->pending_alarm)
+       if (priv->pending_alarm) {
+               priv->pending_alarm = 0;
                rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
                rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
-       priv->pending_alarm = 0;
+       }
        priv->intr_handle.fd = 0;
        priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
 }
        priv->intr_handle.fd = 0;
        priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
 }
index 7445320..1f42dac 100644 (file)
@@ -517,12 +517,10 @@ nfp_net_configure(struct rte_eth_dev *dev)
                new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 
        if (rxmode->jumbo_frame)
                new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 
        if (rxmode->jumbo_frame)
-               /* this is handled in rte_eth_dev_configure */
+               hw->mtu = rxmode->max_rx_pkt_len;
 
 
-       if (rxmode->hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "strip CRC not supported\n");
-               return -EINVAL;
-       }
+       if (!rxmode->hw_strip_crc)
+               PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable\n");
 
        if (rxmode->enable_scatter) {
                PMD_INIT_LOG(INFO, "Scatter not supported\n");
 
        if (rxmode->enable_scatter) {
                PMD_INIT_LOG(INFO, "Scatter not supported\n");
@@ -1012,7 +1010,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
        dev_info->min_rx_bufsize = ETHER_MIN_MTU;
        dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
        dev_info->min_rx_bufsize = ETHER_MIN_MTU;
-       dev_info->max_rx_pktlen = hw->mtu;
+       dev_info->max_rx_pktlen = hw->max_mtu;
        /* Next should change when PF support is implemented */
        dev_info->max_mac_addrs = 1;
 
        /* Next should change when PF support is implemented */
        dev_info->max_mac_addrs = 1;
 
@@ -1240,6 +1238,13 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
                return -EINVAL;
 
        if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
                return -EINVAL;
 
+       /* mtu setting is forbidden if port is started */
+       if (dev->data->dev_started) {
+               PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+                           dev->data->port_id);
+               return -EBUSY;
+       }
+
        /* switch to jumbo mode if needed */
        if ((uint32_t)mtu > ETHER_MAX_LEN)
                dev->data->dev_conf.rxmode.jumbo_frame = 1;
        /* switch to jumbo mode if needed */
        if ((uint32_t)mtu > ETHER_MAX_LEN)
                dev->data->dev_conf.rxmode.jumbo_frame = 1;
@@ -2390,7 +2395,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
        hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
        hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
        hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
        hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
        hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
-       hw->mtu = hw->max_mtu;
+       hw->mtu = ETHER_MTU;
 
        if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
                hw->rx_offset = NFP_NET_RX_OFFSET;
 
        if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
                hw->rx_offset = NFP_NET_RX_OFFSET;
index 836d982..9704895 100644 (file)
@@ -93,7 +93,7 @@ static struct rte_eth_link pmd_link = {
        .link_speed = ETH_SPEED_NUM_10G,
        .link_duplex = ETH_LINK_FULL_DUPLEX,
        .link_status = ETH_LINK_DOWN,
        .link_speed = ETH_SPEED_NUM_10G,
        .link_duplex = ETH_LINK_FULL_DUPLEX,
        .link_status = ETH_LINK_DOWN,
-       .link_autoneg = ETH_LINK_SPEED_AUTONEG,
+       .link_autoneg = ETH_LINK_AUTONEG,
 };
 
 static uint16_t
 };
 
 static uint16_t
index f6b3c10..76c131b 100644 (file)
@@ -124,7 +124,7 @@ static struct rte_eth_link pmd_link = {
                .link_speed = ETH_SPEED_NUM_10G,
                .link_duplex = ETH_LINK_FULL_DUPLEX,
                .link_status = ETH_LINK_DOWN,
                .link_speed = ETH_SPEED_NUM_10G,
                .link_duplex = ETH_LINK_FULL_DUPLEX,
                .link_status = ETH_LINK_DOWN,
-               .link_autoneg = ETH_LINK_SPEED_FIXED,
+               .link_autoneg = ETH_LINK_AUTONEG,
 };
 
 static int
 };
 
 static int
@@ -801,7 +801,7 @@ pmd_init_internals(const char *name, const unsigned int nb_rx_queues,
        struct rte_eth_dev_data *data = NULL;
        unsigned int numa_node = rte_socket_id();
 
        struct rte_eth_dev_data *data = NULL;
        unsigned int numa_node = rte_socket_id();
 
-       RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %u\n",
+       RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %d\n",
                numa_node);
 
        /* now do all data allocation - for eth_dev structure
                numa_node);
 
        /* now do all data allocation - for eth_dev structure
@@ -1042,7 +1042,7 @@ pmd_pcap_remove(const char *name)
 {
        struct rte_eth_dev *eth_dev = NULL;
 
 {
        struct rte_eth_dev *eth_dev = NULL;
 
-       RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %u\n",
+       RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %d\n",
                        rte_socket_id());
 
        if (name == NULL)
                        rte_socket_id());
 
        if (name == NULL)
index 8aa3c0b..8e521ec 100644 (file)
@@ -239,10 +239,9 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
                status = true;
        } else {
                *type = DCBX_MAX_PROTOCOL_TYPE;
                status = true;
        } else {
                *type = DCBX_MAX_PROTOCOL_TYPE;
-               DP_ERR(p_hwfn,
-                      "No action required, App TLV id = 0x%x"
-                      " app_prio_bitmap = 0x%x\n",
-                      id, app_prio_bitmap);
+               DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+                           "No action required, App TLV entry = 0x%x\n",
+                          app_prio_bitmap);
        }
 
        return status;
        }
 
        return status;
index 5ff8f28..de0c587 100644 (file)
@@ -1027,6 +1027,12 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
                if (sge_tpa_params->tpa_gro_consistent_flg)
                        p_sge_tpa_tlv->sge_tpa_flags |=
                            VFPF_TPA_GRO_CONSIST_FLAG;
                if (sge_tpa_params->tpa_gro_consistent_flg)
                        p_sge_tpa_tlv->sge_tpa_flags |=
                            VFPF_TPA_GRO_CONSIST_FLAG;
+               if (sge_tpa_params->tpa_ipv4_tunn_en_flg)
+                       p_sge_tpa_tlv->sge_tpa_flags |=
+                           VFPF_TPA_TUNN_IPV4_EN_FLAG;
+               if (sge_tpa_params->tpa_ipv6_tunn_en_flg)
+                       p_sge_tpa_tlv->sge_tpa_flags |=
+                           VFPF_TPA_TUNN_IPV6_EN_FLAG;
 
                p_sge_tpa_tlv->tpa_max_aggs_num =
                    sge_tpa_params->tpa_max_aggs_num;
 
                p_sge_tpa_tlv->tpa_max_aggs_num =
                    sge_tpa_params->tpa_max_aggs_num;
index 149d092..cc835d7 100644 (file)
@@ -379,6 +379,8 @@ struct vfpf_vport_update_sge_tpa_tlv {
        #define VFPF_TPA_PKT_SPLIT_FLAG      (1 << 2)
        #define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
        #define VFPF_TPA_GRO_CONSIST_FLAG    (1 << 4)
        #define VFPF_TPA_PKT_SPLIT_FLAG      (1 << 2)
        #define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
        #define VFPF_TPA_GRO_CONSIST_FLAG    (1 << 4)
+       #define VFPF_TPA_TUNN_IPV4_EN_FLAG   (1 << 5)
+       #define VFPF_TPA_TUNN_IPV6_EN_FLAG   (1 << 6)
 
        u8                      update_sge_tpa_flags;
        #define VFPF_UPDATE_SGE_DEPRECATED_FLAG    (1 << 0)
 
        u8                      update_sge_tpa_flags;
        #define VFPF_UPDATE_SGE_DEPRECATED_FLAG    (1 << 0)
index 9d782ac..5275ef9 100644 (file)
@@ -282,6 +282,67 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
        return 0;
 }
 
        return 0;
 }
 
+static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
+{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+#endif
+       unsigned int i = 0, j = 0, qid;
+       unsigned int rxq_stat_cntrs, txq_stat_cntrs;
+       struct qede_tx_queue *txq;
+
+       DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
+
+       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+                              RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+                              RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+       for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+               if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+                       OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+                           offsetof(struct qede_rx_queue, rcv_pkts), 0,
+                           sizeof(uint64_t));
+                       OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+                           offsetof(struct qede_rx_queue, rx_hw_errors), 0,
+                           sizeof(uint64_t));
+                       OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+                           offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
+                           sizeof(uint64_t));
+
+                       if (xstats)
+                               for (j = 0;
+                                    j < RTE_DIM(qede_rxq_xstats_strings); j++)
+                                       OSAL_MEMSET((((char *)
+                                           (qdev->fp_array[qid].rxq)) +
+                                           qede_rxq_xstats_strings[j].offset),
+                                           0,
+                                           sizeof(uint64_t));
+
+                       i++;
+                       if (i == rxq_stat_cntrs)
+                               break;
+               }
+       }
+
+       i = 0;
+
+       for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+               if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
+                       txq = qdev->fp_array[(qid)].txqs[0];
+
+                       OSAL_MEMSET((uint64_t *)(uintptr_t)
+                               (((uint64_t)(uintptr_t)(txq)) +
+                                offsetof(struct qede_tx_queue, xmit_pkts)), 0,
+                           sizeof(uint64_t));
+
+                       i++;
+                       if (i == txq_stat_cntrs)
+                               break;
+               }
+       }
+}
+
 static int
 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
                  bool add)
 static int
 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
                  bool add)
@@ -629,7 +690,7 @@ static int qede_init_vport(struct qede_dev *qdev)
 
        start.remove_inner_vlan = 1;
        start.gro_enable = 0;
 
        start.remove_inner_vlan = 1;
        start.gro_enable = 0;
-       start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
+       start.mtu = qdev->mtu;
        start.vport_id = 0;
        start.drop_ttl0 = false;
        start.clear_stats = 1;
        start.vport_id = 0;
        start.drop_ttl0 = false;
        start.clear_stats = 1;
@@ -674,6 +735,14 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
                }
        }
 
                }
        }
 
+       /* We need to have min 1 RX queue.There is no min check in
+        * rte_eth_dev_configure(), so we are checking it here.
+        */
+       if (eth_dev->data->nb_rx_queues == 0) {
+               DP_ERR(edev, "Minimum one RX queue is required\n");
+               return -EINVAL;
+       }
+
        /* Sanity checks and throw warnings */
        if (rxmode->enable_scatter == 1)
                eth_dev->data->scattered_rx = 1;
        /* Sanity checks and throw warnings */
        if (rxmode->enable_scatter == 1)
                eth_dev->data->scattered_rx = 1;
@@ -709,6 +778,14 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
        if (rc != 0)
                return rc;
 
        if (rc != 0)
                return rc;
 
+       /* If jumbo enabled adjust MTU */
+       if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+               eth_dev->data->mtu =
+                       eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+                       ETHER_HDR_LEN - ETHER_CRC_LEN;
+
+       qdev->mtu = eth_dev->data->mtu;
+
        /* Issue VPORT-START with default config values to allow
         * other port configurations early on.
         */
        /* Issue VPORT-START with default config values to allow
         * other port configurations early on.
         */
@@ -756,8 +833,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 
        PMD_INIT_FUNC_TRACE(edev);
 
 
        PMD_INIT_FUNC_TRACE(edev);
 
-       dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
-                                             QEDE_ETH_OVERHEAD);
+       dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
        dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
        dev_info->rx_desc_lim = qede_rx_desc_lim;
        dev_info->tx_desc_lim = qede_tx_desc_lim;
        dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
        dev_info->rx_desc_lim = qede_rx_desc_lim;
        dev_info->tx_desc_lim = qede_tx_desc_lim;
@@ -1115,6 +1191,7 @@ qede_reset_xstats(struct rte_eth_dev *dev)
        struct ecore_dev *edev = &qdev->edev;
 
        ecore_reset_vport_stats(edev);
        struct ecore_dev *edev = &qdev->edev;
 
        ecore_reset_vport_stats(edev);
+       qede_reset_queue_stats(qdev, true);
 }
 
 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
 }
 
 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
@@ -1150,6 +1227,7 @@ static void qede_reset_stats(struct rte_eth_dev *eth_dev)
        struct ecore_dev *edev = &qdev->edev;
 
        ecore_reset_vport_stats(edev);
        struct ecore_dev *edev = &qdev->edev;
 
        ecore_reset_vport_stats(edev);
+       qede_reset_queue_stats(qdev, false);
 }
 
 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
 }
 
 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
@@ -1395,32 +1473,76 @@ int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
 
 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
 
 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
-       uint32_t frame_size;
-       struct qede_dev *qdev = dev->data->dev_private;
+       struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct rte_eth_dev_info dev_info = {0};
        struct rte_eth_dev_info dev_info = {0};
+       struct qede_fastpath *fp;
+       uint32_t max_rx_pkt_len;
+       uint32_t frame_size;
+       uint16_t rx_buf_size;
+       uint16_t bufsz;
+       bool restart = false;
+       int i;
 
 
+       PMD_INIT_FUNC_TRACE(edev);
+       if (IS_VF(edev))
+               return -ENOTSUP;
        qede_dev_info_get(dev, &dev_info);
        qede_dev_info_get(dev, &dev_info);
-
-       /* VLAN_TAG = 4 */
-       frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
-
-       if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+       max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+       frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+       if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
+               DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
+                      mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
+                       ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
                return -EINVAL;
                return -EINVAL;
-
+       }
        if (!dev->data->scattered_rx &&
        if (!dev->data->scattered_rx &&
-           frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+           frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+               DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
+                       dev->data->min_rx_buf_size);
                return -EINVAL;
                return -EINVAL;
-
-       if (frame_size > ETHER_MAX_LEN)
+       }
+       /* Temporarily replace I/O functions with dummy ones. It cannot
+        * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+        */
+       dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+       dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+       if (dev->data->dev_started) {
+               dev->data->dev_started = 0;
+               qede_dev_stop(dev);
+               restart = true;
+       }
+       rte_delay_ms(1000);
+       qdev->mtu = mtu;
+       /* Fix up RX buf size for all queues of the port */
+       for_each_queue(i) {
+               fp = &qdev->fp_array[i];
+               if ((fp->type & QEDE_FASTPATH_RX) && (fp->rxq != NULL)) {
+                       bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+                               fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+                       if (dev->data->scattered_rx)
+                               rx_buf_size = bufsz + ETHER_HDR_LEN +
+                                             ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
+                       else
+                               rx_buf_size = frame_size;
+                       rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+                       fp->rxq->rx_buf_size = rx_buf_size;
+                       DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+               }
+       }
+       if (max_rx_pkt_len > ETHER_MAX_LEN)
                dev->data->dev_conf.rxmode.jumbo_frame = 1;
        else
                dev->data->dev_conf.rxmode.jumbo_frame = 0;
                dev->data->dev_conf.rxmode.jumbo_frame = 1;
        else
                dev->data->dev_conf.rxmode.jumbo_frame = 0;
-
+       if (!dev->data->dev_started && restart) {
+               qede_dev_start(dev);
+               dev->data->dev_started = 1;
+       }
        /* update max frame size */
        /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-       qdev->mtu = mtu;
-       qede_dev_stop(dev);
-       qede_dev_start(dev);
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
+       /* Reassign back */
+       dev->rx_pkt_burst = qede_recv_pkts;
+       dev->tx_pkt_burst = qede_xmit_pkts;
 
        return 0;
 }
 
        return 0;
 }
index 9cce13d..e586dc7 100644 (file)
@@ -89,11 +89,11 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 {
        struct qede_dev *qdev = dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
 {
        struct qede_dev *qdev = dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
-       struct rte_eth_dev_data *eth_data = dev->data;
+       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
        struct qede_rx_queue *rxq;
        struct qede_rx_queue *rxq;
-       uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       uint16_t max_rx_pkt_len;
+       uint16_t bufsz;
        size_t size;
        size_t size;
-       uint16_t data_size;
        int rc;
        int i;
 
        int rc;
        int i;
 
@@ -127,34 +127,27 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        rxq->nb_rx_desc = nb_desc;
        rxq->queue_id = queue_idx;
        rxq->port_id = dev->data->port_id;
        rxq->nb_rx_desc = nb_desc;
        rxq->queue_id = queue_idx;
        rxq->port_id = dev->data->port_id;
-
-       /* Sanity check */
-       data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
-                               RTE_PKTMBUF_HEADROOM;
-
-       if (pkt_len > data_size && !dev->data->scattered_rx) {
-               DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
-                      pkt_len, data_size);
-               rte_free(rxq);
-               return -EINVAL;
+       max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+
+       /* Fix up RX buffer size */
+       bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+       if ((rxmode->enable_scatter)                    ||
+           (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+               if (!dev->data->scattered_rx) {
+                       DP_INFO(edev, "Forcing scatter-gather mode\n");
+                       dev->data->scattered_rx = 1;
+               }
        }
        }
-
        if (dev->data->scattered_rx)
        if (dev->data->scattered_rx)
-               rxq->rx_buf_size = data_size;
+               rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
+                                  ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
        else
        else
-               rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
+               rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+       /* Align to cache-line size if needed */
+       rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
 
 
-       qdev->mtu = pkt_len;
-
-       DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
-               qdev->mtu, rxq->rx_buf_size);
-
-       if (pkt_len > ETHER_MAX_LEN) {
-               dev->data->dev_conf.rxmode.jumbo_frame = 1;
-               DP_NOTICE(edev, false, "jumbo frame enabled\n");
-       } else {
-               dev->data->dev_conf.rxmode.jumbo_frame = 0;
-       }
+       DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
+               qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
 
        /* Allocate the parallel driver ring for Rx buffers */
        size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
 
        /* Allocate the parallel driver ring for Rx buffers */
        size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
@@ -222,7 +215,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        dev->data->rx_queues[queue_idx] = rxq;
 
        DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
        dev->data->rx_queues[queue_idx] = rxq;
 
        DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
-                 queue_idx, nb_desc, qdev->mtu, socket_id);
+                 queue_idx, nb_desc, rxq->rx_buf_size, socket_id);
 
        return 0;
 err4:
 
        return 0;
 err4:
@@ -1541,3 +1534,11 @@ void qede_dev_stop(struct rte_eth_dev *eth_dev)
 
        DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
 }
 
        DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
 }
+
+uint16_t
+qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+                    __rte_unused struct rte_mbuf **pkts,
+                    __rte_unused uint16_t nb_pkts)
+{
+       return 0;
+}
index ed9a529..d1f3e99 100644 (file)
        ((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
                << PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
 
        ((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
                << PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
 
+#define QEDE_MIN_RX_BUFF_SIZE          (1024)
+#define QEDE_VLAN_TAG_SIZE             (4)
+#define QEDE_LLC_SNAP_HDR_LEN          (8)
+
 /* Max supported alignment is 256 (8 shift)
  * minimal alignment shift 6 is optimal for 57xxx HW performance
  */
 #define QEDE_L1_CACHE_SHIFT    6
 #define QEDE_RX_ALIGN_SHIFT    (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
 #define QEDE_FW_RX_ALIGN_END   (1UL << QEDE_RX_ALIGN_SHIFT)
 /* Max supported alignment is 256 (8 shift)
  * minimal alignment shift 6 is optimal for 57xxx HW performance
  */
 #define QEDE_L1_CACHE_SHIFT    6
 #define QEDE_RX_ALIGN_SHIFT    (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
 #define QEDE_FW_RX_ALIGN_END   (1UL << QEDE_RX_ALIGN_SHIFT)
-
-#define QEDE_ETH_OVERHEAD       (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
+#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
+                                       ~(QEDE_FW_RX_ALIGN_END - 1))
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
+#define QEDE_ETH_OVERHEAD      (((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
+                               + (QEDE_LLC_SNAP_HDR_LEN))
 
 /* TBD: Excluding IPV6 */
 #define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
 
 /* TBD: Excluding IPV6 */
 #define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
@@ -180,6 +187,10 @@ uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
 uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
                        uint16_t nb_pkts);
 
 uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
                        uint16_t nb_pkts);
 
+uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+                             __rte_unused struct rte_mbuf **pkts,
+                             __rte_unused uint16_t nb_pkts);
+
 /* Fastpath resource alloc/dealloc helpers */
 int qede_alloc_fp_resc(struct qede_dev *qdev);
 
 /* Fastpath resource alloc/dealloc helpers */
 int qede_alloc_fp_resc(struct qede_dev *qdev);
 
index c1767c4..729d38c 100644 (file)
@@ -80,7 +80,7 @@ static struct rte_eth_link pmd_link = {
                .link_speed = ETH_SPEED_NUM_10G,
                .link_duplex = ETH_LINK_FULL_DUPLEX,
                .link_status = ETH_LINK_DOWN,
                .link_speed = ETH_SPEED_NUM_10G,
                .link_duplex = ETH_LINK_FULL_DUPLEX,
                .link_status = ETH_LINK_DOWN,
-               .link_autoneg = ETH_LINK_SPEED_AUTONEG
+               .link_autoneg = ETH_LINK_AUTONEG
 };
 
 static uint16_t
 };
 
 static uint16_t
index f3cd52d..9cf408e 100644 (file)
@@ -1169,7 +1169,7 @@ eth_link_update(struct rte_eth_dev *dev,
        link.link_status = (cgmii_ibuf_is_enabled(ibuf) &&
                        cgmii_ibuf_is_link_up(ibuf)) ? ETH_LINK_UP : ETH_LINK_DOWN;
 
        link.link_status = (cgmii_ibuf_is_enabled(ibuf) &&
                        cgmii_ibuf_is_link_up(ibuf)) ? ETH_LINK_UP : ETH_LINK_DOWN;
 
-       link.link_autoneg = ETH_LINK_SPEED_FIXED;
+       link.link_autoneg = ETH_LINK_FIXED;
 
        rte_atomic64_cmpset((uint64_t *)dev_link, *(uint64_t *)dev_link,
                        *(uint64_t *)link_ptr);
 
        rte_atomic64_cmpset((uint64_t *)dev_link, *(uint64_t *)dev_link,
                        *(uint64_t *)link_ptr);
@@ -1494,7 +1494,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
                        dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
                        PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
        close(fd);
                        dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
                        PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
        close(fd);
-       if (pci_resource_ptr == NULL) {
+       if (pci_resource_ptr == MAP_FAILED) {
                RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n",
                                rsc_filename, fd);
                return -EINVAL;
                RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n",
                                rsc_filename, fd);
                return -EINVAL;
index 2da5af0..d229bdf 100644 (file)
@@ -98,7 +98,7 @@ nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
        else if (nic->duplex == NICVF_FULL_DUPLEX)
                link->link_duplex = ETH_LINK_FULL_DUPLEX;
        link->link_speed = nic->speed;
        else if (nic->duplex == NICVF_FULL_DUPLEX)
                link->link_duplex = ETH_LINK_FULL_DUPLEX;
        link->link_speed = nic->speed;
-       link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
+       link->link_autoneg = ETH_LINK_AUTONEG;
 }
 
 static void
 }
 
 static void
index 87e9de1..275adb3 100644 (file)
@@ -252,7 +252,7 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        /* Inform HW to xmit the packets */
        nicvf_addr_write(sq->sq_door, used_desc);
 
        /* Inform HW to xmit the packets */
        nicvf_addr_write(sq->sq_door, used_desc);
-       return nb_pkts;
+       return i;
 }
 
 static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
 }
 
 static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
index 328dde0..8fde603 100644 (file)
@@ -558,7 +558,7 @@ new_device(int vid)
                rte_atomic32_set(&vq->allow_queuing, 1);
        }
 
                rte_atomic32_set(&vq->allow_queuing, 1);
        }
 
-       RTE_LOG(INFO, PMD, "New connection established\n");
+       RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
 
        _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 
 
        _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 
@@ -625,7 +625,7 @@ destroy_device(int vid)
        state->max_vring = 0;
        rte_spinlock_unlock(&state->lock);
 
        state->max_vring = 0;
        rte_spinlock_unlock(&state->lock);
 
-       RTE_LOG(INFO, PMD, "Connection closed\n");
+       RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
 
        _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 }
 
        _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 }
index 8592485..5a27ab7 100644 (file)
@@ -288,17 +288,6 @@ virtio_dev_queue_release(void *queue __rte_unused)
        /* do nothing */
 }
 
        /* do nothing */
 }
 
-static int
-virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
-{
-       if (vtpci_queue_idx == hw->max_queue_pairs * 2)
-               return VTNET_CQ;
-       else if (vtpci_queue_idx % 2 == 0)
-               return VTNET_RQ;
-       else
-               return VTNET_TQ;
-}
-
 static uint16_t
 virtio_get_nr_vq(struct virtio_hw *hw)
 {
 static uint16_t
 virtio_get_nr_vq(struct virtio_hw *hw)
 {
@@ -847,7 +836,7 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
                /* Note: limit checked in rte_eth_xstats_names() */
 
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
                /* Note: limit checked in rte_eth_xstats_names() */
 
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
-                       struct virtqueue *rxvq = dev->data->rx_queues[i];
+                       struct virtnet_rx *rxvq = dev->data->rx_queues[i];
                        if (rxvq == NULL)
                                continue;
                        for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
                        if (rxvq == NULL)
                                continue;
                        for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
@@ -860,7 +849,7 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
                }
 
                for (i = 0; i < dev->data->nb_tx_queues; i++) {
                }
 
                for (i = 0; i < dev->data->nb_tx_queues; i++) {
-                       struct virtqueue *txvq = dev->data->tx_queues[i];
+                       struct virtnet_tx *txvq = dev->data->tx_queues[i];
                        if (txvq == NULL)
                                continue;
                        for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
                        if (txvq == NULL)
                                continue;
                        for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
@@ -1205,6 +1194,11 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
        /* Reset the device although not necessary at startup */
        vtpci_reset(hw);
 
        /* Reset the device although not necessary at startup */
        vtpci_reset(hw);
 
+       if (hw->vqs) {
+               virtio_dev_free_mbufs(eth_dev);
+               virtio_free_queues(hw);
+       }
+
        /* Tell the host we've noticed this device. */
        vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
 
        /* Tell the host we've noticed this device. */
        vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
 
@@ -1565,7 +1559,7 @@ virtio_dev_start(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxvq = dev->data->rx_queues[i];
                /* Flush the old packets */
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxvq = dev->data->rx_queues[i];
                /* Flush the old packets */
-               virtqueue_flush(rxvq->vq);
+               virtqueue_rxvq_flush(rxvq->vq);
                virtqueue_notify(rxvq->vq);
        }
 
                virtqueue_notify(rxvq->vq);
        }
 
@@ -1597,12 +1591,15 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                struct virtnet_rx *rxvq = dev->data->rx_queues[i];
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                struct virtnet_rx *rxvq = dev->data->rx_queues[i];
 
+               if (rxvq == NULL || rxvq->vq == NULL)
+                       continue;
+
                PMD_INIT_LOG(DEBUG,
                             "Before freeing rxq[%d] used and unused buf", i);
                VIRTQUEUE_DUMP(rxvq->vq);
 
                PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
                PMD_INIT_LOG(DEBUG,
                             "Before freeing rxq[%d] used and unused buf", i);
                VIRTQUEUE_DUMP(rxvq->vq);
 
                PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
-               while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {
+               while ((buf = virtqueue_detach_unused(rxvq->vq)) != NULL) {
                        rte_pktmbuf_free(buf);
                        mbuf_num++;
                }
                        rte_pktmbuf_free(buf);
                        mbuf_num++;
                }
@@ -1616,13 +1613,16 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                struct virtnet_tx *txvq = dev->data->tx_queues[i];
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                struct virtnet_tx *txvq = dev->data->tx_queues[i];
 
+               if (txvq == NULL || txvq->vq == NULL)
+                       continue;
+
                PMD_INIT_LOG(DEBUG,
                             "Before freeing txq[%d] used and unused bufs",
                             i);
                VIRTQUEUE_DUMP(txvq->vq);
 
                mbuf_num = 0;
                PMD_INIT_LOG(DEBUG,
                             "Before freeing txq[%d] used and unused bufs",
                             i);
                VIRTQUEUE_DUMP(txvq->vq);
 
                mbuf_num = 0;
-               while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {
+               while ((buf = virtqueue_detach_unused(txvq->vq)) != NULL) {
                        rte_pktmbuf_free(buf);
                        mbuf_num++;
                }
                        rte_pktmbuf_free(buf);
                        mbuf_num++;
                }
index 43fac63..c2fe9eb 100644 (file)
@@ -61,6 +61,7 @@
 #include "virtio_pci.h"
 #include "virtqueue.h"
 #include "virtio_rxtx.h"
 #include "virtio_pci.h"
 #include "virtqueue.h"
 #include "virtio_rxtx.h"
+#include "virtio_rxtx_simple.h"
 
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
 
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@@ -458,6 +459,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
                        vq->vq_ring.desc[desc_idx].flags =
                                VRING_DESC_F_WRITE;
                }
                        vq->vq_ring.desc[desc_idx].flags =
                                VRING_DESC_F_WRITE;
                }
+
+               virtio_rxq_vec_setup(rxvq);
        }
 
        memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
        }
 
        memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
@@ -467,30 +470,31 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
                        &rxvq->fake_mbuf;
        }
 
                        &rxvq->fake_mbuf;
        }
 
-       while (!virtqueue_full(vq)) {
-               m = rte_mbuf_raw_alloc(rxvq->mpool);
-               if (m == NULL)
-                       break;
+       if (hw->use_simple_rxtx) {
+               while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+                       virtio_rxq_rearm_vec(rxvq);
+                       nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+               }
+       } else {
+               while (!virtqueue_full(vq)) {
+                       m = rte_mbuf_raw_alloc(rxvq->mpool);
+                       if (m == NULL)
+                               break;
 
 
-               /* Enqueue allocated buffers */
-               if (hw->use_simple_rxtx)
-                       error = virtqueue_enqueue_recv_refill_simple(vq, m);
-               else
+                       /* Enqueue allocated buffers */
                        error = virtqueue_enqueue_recv_refill(vq, m);
                        error = virtqueue_enqueue_recv_refill(vq, m);
-
-               if (error) {
-                       rte_pktmbuf_free(m);
-                       break;
+                       if (error) {
+                               rte_pktmbuf_free(m);
+                               break;
+                       }
+                       nbufs++;
                }
                }
-               nbufs++;
-       }
 
 
-       vq_update_avail_idx(vq);
+               vq_update_avail_idx(vq);
+       }
 
        PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
 
 
        PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
 
-       virtio_rxq_vec_setup(rxvq);
-
        VIRTQUEUE_DUMP(vq);
 
        return 0;
        VIRTQUEUE_DUMP(vq);
 
        return 0;
@@ -506,7 +510,7 @@ virtio_update_rxtx_handler(struct rte_eth_dev *dev,
 #if defined RTE_ARCH_X86
        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3))
                use_simple_rxtx = 1;
 #if defined RTE_ARCH_X86
        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3))
                use_simple_rxtx = 1;
-#elif defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
+#elif defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
                use_simple_rxtx = 1;
 #endif
        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
                use_simple_rxtx = 1;
 #endif
index 28f82d6..e2db823 100644 (file)
@@ -88,7 +88,4 @@ struct virtnet_ctl {
 
 int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
 
 
 int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
 
-int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
-       struct rte_mbuf *m);
-
 #endif /* _VIRTIO_RXTX_H_ */
 #endif /* _VIRTIO_RXTX_H_ */
index a6c0b34..5285c2b 100644 (file)
 #pragma GCC diagnostic ignored "-Wcast-qual"
 #endif
 
 #pragma GCC diagnostic ignored "-Wcast-qual"
 #endif
 
-int __attribute__((cold))
-virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
-       struct rte_mbuf *cookie)
-{
-       struct vq_desc_extra *dxp;
-       struct vring_desc *start_dp;
-       uint16_t desc_idx;
-
-       cookie->port = vq->rxq.port_id;
-
-       desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
-       dxp = &vq->vq_descx[desc_idx];
-       dxp->cookie = (void *)cookie;
-       vq->sw_ring[desc_idx] = cookie;
-
-       start_dp = vq->vq_ring.desc;
-       start_dp[desc_idx].addr =
-               VIRTIO_MBUF_ADDR(cookie, vq) +
-               RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
-       start_dp[desc_idx].len = cookie->buf_len -
-               RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
-
-       vq->vq_free_cnt--;
-       vq->vq_avail_idx++;
-
-       return 0;
-}
-
 uint16_t
 virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint16_t nb_pkts)
 uint16_t
 virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint16_t nb_pkts)
@@ -102,7 +74,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
        rte_compiler_barrier();
 
        if (nb_used >= VIRTIO_TX_FREE_THRESH)
        rte_compiler_barrier();
 
        if (nb_used >= VIRTIO_TX_FREE_THRESH)
-               virtio_xmit_cleanup(vq);
+               virtio_xmit_cleanup_simple(vq);
 
        nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
        desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
 
        nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
        desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
index b08f859..bca677c 100644 (file)
@@ -89,7 +89,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
 #define VIRTIO_TX_FREE_NR 32
 /* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
 static inline void
 #define VIRTIO_TX_FREE_NR 32
 /* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
 static inline void
-virtio_xmit_cleanup(struct virtqueue *vq)
+virtio_xmit_cleanup_simple(struct virtqueue *vq)
 {
        uint16_t i, desc_idx;
        uint32_t nb_free = 0;
 {
        uint16_t i, desc_idx;
        uint32_t nb_free = 0;
index 91f6a59..8bb155d 100644 (file)
@@ -142,6 +142,9 @@ virtio_user_start_device(struct virtio_user_dev *dev)
        uint64_t features;
        int ret;
 
        uint64_t features;
        int ret;
 
+       /* Do not check return as already done in init, or reset in stop */
+       vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL);
+
        /* Step 0: tell vhost to create queues */
        if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
                goto error;
        /* Step 0: tell vhost to create queues */
        if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
                goto error;
@@ -240,6 +243,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
                PMD_INIT_LOG(ERR, "backend set up fails");
                return -1;
        }
                PMD_INIT_LOG(ERR, "backend set up fails");
                return -1;
        }
+
        if (vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL) < 0) {
                PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
                return -1;
        if (vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL) < 0) {
                PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
                return -1;
index 4f8707a..0e9e03c 100644 (file)
@@ -37,6 +37,7 @@
 #include "virtqueue.h"
 #include "virtio_logs.h"
 #include "virtio_pci.h"
 #include "virtqueue.h"
 #include "virtio_logs.h"
 #include "virtio_pci.h"
+#include "virtio_rxtx_simple.h"
 
 void
 virtqueue_disable_intr(struct virtqueue *vq)
 
 void
 virtqueue_disable_intr(struct virtqueue *vq)
@@ -55,26 +56,50 @@ virtqueue_disable_intr(struct virtqueue *vq)
  * 2) mbuf that hasn't been consued by backend.
  */
 struct rte_mbuf *
  * 2) mbuf that hasn't been consued by backend.
  */
 struct rte_mbuf *
-virtqueue_detatch_unused(struct virtqueue *vq)
+virtqueue_detach_unused(struct virtqueue *vq)
 {
        struct rte_mbuf *cookie;
 {
        struct rte_mbuf *cookie;
-       int idx;
+       struct virtio_hw *hw;
+       uint16_t start, end;
+       int type, idx;
 
 
-       if (vq != NULL)
-               for (idx = 0; idx < vq->vq_nentries; idx++) {
+       if (vq == NULL)
+               return NULL;
+
+       hw = vq->hw;
+       type = virtio_get_queue_type(hw, vq->vq_queue_index);
+       start = vq->vq_avail_idx & (vq->vq_nentries - 1);
+       end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
+
+       for (idx = 0; idx < vq->vq_nentries; idx++) {
+               if (hw->use_simple_rxtx && type == VTNET_RQ) {
+                       if (start <= end && idx >= start && idx < end)
+                               continue;
+                       if (start > end && (idx >= start || idx < end))
+                               continue;
+                       cookie = vq->sw_ring[idx];
+                       if (cookie != NULL) {
+                               vq->sw_ring[idx] = NULL;
+                               return cookie;
+                       }
+               } else {
                        cookie = vq->vq_descx[idx].cookie;
                        if (cookie != NULL) {
                                vq->vq_descx[idx].cookie = NULL;
                                return cookie;
                        }
                }
                        cookie = vq->vq_descx[idx].cookie;
                        if (cookie != NULL) {
                                vq->vq_descx[idx].cookie = NULL;
                                return cookie;
                        }
                }
+       }
+
        return NULL;
 }
 
 /* Flush the elements in the used ring. */
 void
        return NULL;
 }
 
 /* Flush the elements in the used ring. */
 void
-virtqueue_flush(struct virtqueue *vq)
+virtqueue_rxvq_flush(struct virtqueue *vq)
 {
 {
+       struct virtnet_rx *rxq = &vq->rxq;
+       struct virtio_hw *hw = vq->hw;
        struct vring_used_elem *uep;
        struct vq_desc_extra *dxp;
        uint16_t used_idx, desc_idx;
        struct vring_used_elem *uep;
        struct vq_desc_extra *dxp;
        uint16_t used_idx, desc_idx;
@@ -85,13 +110,27 @@ virtqueue_flush(struct virtqueue *vq)
        for (i = 0; i < nb_used; i++) {
                used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
                uep = &vq->vq_ring.used->ring[used_idx];
        for (i = 0; i < nb_used; i++) {
                used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
                uep = &vq->vq_ring.used->ring[used_idx];
-               desc_idx = (uint16_t)uep->id;
-               dxp = &vq->vq_descx[desc_idx];
-               if (dxp->cookie != NULL) {
-                       rte_pktmbuf_free(dxp->cookie);
-                       dxp->cookie = NULL;
+               if (hw->use_simple_rxtx) {
+                       desc_idx = used_idx;
+                       rte_pktmbuf_free(vq->sw_ring[desc_idx]);
+                       vq->vq_free_cnt++;
+               } else {
+                       desc_idx = (uint16_t)uep->id;
+                       dxp = &vq->vq_descx[desc_idx];
+                       if (dxp->cookie != NULL) {
+                               rte_pktmbuf_free(dxp->cookie);
+                               dxp->cookie = NULL;
+                       }
+                       vq_ring_free_chain(vq, desc_idx);
                }
                vq->vq_used_cons_idx++;
                }
                vq->vq_used_cons_idx++;
-               vq_ring_free_chain(vq, desc_idx);
+       }
+
+       if (hw->use_simple_rxtx) {
+               while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+                       virtio_rxq_rearm_vec(rxq);
+                       if (virtqueue_kick_prepare(vq))
+                               virtqueue_notify(vq);
+               }
        }
 }
        }
 }
index ec967a5..3748f60 100644 (file)
@@ -288,10 +288,10 @@ void virtqueue_dump(struct virtqueue *vq);
 /**
  *  Get all mbufs to be freed.
  */
 /**
  *  Get all mbufs to be freed.
  */
-struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
+struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
 
 /* Flush the elements in the used ring. */
 
 /* Flush the elements in the used ring. */
-void virtqueue_flush(struct virtqueue *vq);
+void virtqueue_rxvq_flush(struct virtqueue *vq);
 
 static inline int
 virtqueue_full(const struct virtqueue *vq)
 
 static inline int
 virtqueue_full(const struct virtqueue *vq)
@@ -299,6 +299,17 @@ virtqueue_full(const struct virtqueue *vq)
        return vq->vq_free_cnt == 0;
 }
 
        return vq->vq_free_cnt == 0;
 }
 
+static inline int
+virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
+{
+       if (vtpci_queue_idx == hw->max_queue_pairs * 2)
+               return VTNET_CQ;
+       else if (vtpci_queue_idx % 2 == 0)
+               return VTNET_RQ;
+       else
+               return VTNET_TQ;
+}
+
 #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
 
 void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
 #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
 
 void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
index 2bd2f27..9a889c6 100644 (file)
@@ -790,7 +790,7 @@ vmxnet3_dev_link_update(struct rte_eth_dev *dev,
                link.link_status = ETH_LINK_UP;
                link.link_duplex = ETH_LINK_FULL_DUPLEX;
                link.link_speed = ETH_SPEED_NUM_10G;
                link.link_status = ETH_LINK_UP;
                link.link_duplex = ETH_LINK_FULL_DUPLEX;
                link.link_speed = ETH_SPEED_NUM_10G;
-               link.link_autoneg = ETH_LINK_SPEED_FIXED;
+               link.link_autoneg = ETH_LINK_AUTONEG;
        }
 
        vmxnet3_dev_atomic_write_link_status(dev, &link);
        }
 
        vmxnet3_dev_atomic_write_link_status(dev, &link);
index 350eae3..0a6dcd8 100644 (file)
@@ -121,7 +121,7 @@ void virtqueue_dump(struct virtqueue *vq);
 /**
  *  Get all mbufs to be freed.
  */
 /**
  *  Get all mbufs to be freed.
  */
-struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
+struct rte_mbuf * virtqueue_detach_unused(struct virtqueue *vq);
 
 static inline int __attribute__((always_inline))
 virtqueue_full(const struct virtqueue *vq)
 
 static inline int __attribute__((always_inline))
 virtqueue_full(const struct virtqueue *vq)
index 9a4ec80..f1303e3 100644 (file)
@@ -437,6 +437,11 @@ static void cmd_obj_send_parsed(void *parsed_result,
                                (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
 
        created_pkt = rte_pktmbuf_alloc(mbuf_pool);
                                (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
 
        created_pkt = rte_pktmbuf_alloc(mbuf_pool);
+       if (created_pkt == NULL) {
+               cmdline_printf(cl, "Failed to allocate mbuf\n");
+               return;
+       }
+
        pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr);
        created_pkt->data_len = pkt_size;
        created_pkt->pkt_len = pkt_size;
        pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr);
        created_pkt->data_len = pkt_size;
        created_pkt->pkt_len = pkt_size;
index 89bf1cc..331d2f4 100644 (file)
@@ -141,7 +141,7 @@ struct stats {
        uint64_t rx;
        uint64_t tx;
        uint64_t dropped;
        uint64_t rx;
        uint64_t tx;
        uint64_t dropped;
-};
+} __rte_cache_aligned;
 
 /* Array of lcore-specific stats */
 static struct stats lcore_stats[RTE_MAX_LCORE];
 
 /* Array of lcore-specific stats */
 static struct stats lcore_stats[RTE_MAX_LCORE];
index d46bd36..0e1438c 100644 (file)
@@ -1701,7 +1701,7 @@ app_init_pipelines(struct app_params *app)
                data->ptype = ptype;
 
                data->timer_period = (rte_get_tsc_hz() *
                data->ptype = ptype;
 
                data->timer_period = (rte_get_tsc_hz() *
-                       params->timer_period) / 100;
+                       params->timer_period) / 1000;
        }
 }
 
        }
 }
 
index 9cccd8a..3c1ea16 100644 (file)
@@ -409,7 +409,8 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
                }
                /* Only check SPI match for processed IPSec packets */
                sa_idx = ip->res[i] & PROTECT_MASK;
                }
                /* Only check SPI match for processed IPSec packets */
                sa_idx = ip->res[i] & PROTECT_MASK;
-               if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) {
+               if (sa_idx >= IPSEC_SA_MAX_ENTRIES ||
+                               !inbound_sa_check(sa, m, sa_idx)) {
                        rte_pktmbuf_free(m);
                        continue;
                }
                        rte_pktmbuf_free(m);
                        continue;
                }
@@ -474,9 +475,9 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
        for (i = 0; i < ip->num; i++) {
                m = ip->pkts[i];
                sa_idx = ip->res[i] & PROTECT_MASK;
        for (i = 0; i < ip->num; i++) {
                m = ip->pkts[i];
                sa_idx = ip->res[i] & PROTECT_MASK;
-               if ((ip->res[i] == 0) || (ip->res[i] & DISCARD))
+               if (ip->res[i] & DISCARD)
                        rte_pktmbuf_free(m);
                        rte_pktmbuf_free(m);
-               else if (sa_idx != 0) {
+               else if (sa_idx < IPSEC_SA_MAX_ENTRIES) {
                        ipsec->res[ipsec->num] = sa_idx;
                        ipsec->pkts[ipsec->num++] = m;
                } else /* BYPASS */
                        ipsec->res[ipsec->num] = sa_idx;
                        ipsec->pkts[ipsec->num++] = m;
                } else /* BYPASS */
index 8c4406c..513959c 100644 (file)
@@ -232,6 +232,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
        APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
        if (status->status < 0)
                return;
        APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
        if (status->status < 0)
                return;
+       if (atoi(tokens[1]) == INVALID_SPI)
+               return;
        rule->spi = atoi(tokens[1]);
 
        for (ti = 2; ti < n_tokens; ti++) {
        rule->spi = atoi(tokens[1]);
 
        for (ti = 2; ti < n_tokens; ti++) {
index eb5adb6..217a448 100644 (file)
@@ -83,8 +83,6 @@
 
 #define MIN_ZERO_POLL_COUNT 10
 
 
 #define MIN_ZERO_POLL_COUNT 10
 
-/* around 100ms at 2 Ghz */
-#define TIMER_RESOLUTION_CYCLES           200000000ULL
 /* 100 ms interval */
 #define TIMER_NUMBER_PER_SECOND           10
 /* 100000 us */
 /* 100 ms interval */
 #define TIMER_NUMBER_PER_SECOND           10
 /* 100000 us */
@@ -824,7 +822,7 @@ main_loop(__attribute__((unused)) void *dummy)
 {
        struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
        unsigned lcore_id;
 {
        struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
        unsigned lcore_id;
-       uint64_t prev_tsc, diff_tsc, cur_tsc;
+       uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz;
        uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
        int i, j, nb_rx;
        uint8_t portid, queueid;
        uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
        int i, j, nb_rx;
        uint8_t portid, queueid;
@@ -838,6 +836,8 @@ main_loop(__attribute__((unused)) void *dummy)
        const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
 
        prev_tsc = 0;
        const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
 
        prev_tsc = 0;
+       hz = rte_get_timer_hz();
+       tim_res_tsc = hz/TIMER_NUMBER_PER_SECOND;
 
        lcore_id = rte_lcore_id();
        qconf = &lcore_conf[lcore_id];
 
        lcore_id = rte_lcore_id();
        qconf = &lcore_conf[lcore_id];
@@ -883,7 +883,7 @@ main_loop(__attribute__((unused)) void *dummy)
                }
 
                diff_tsc_power = cur_tsc_power - prev_tsc_power;
                }
 
                diff_tsc_power = cur_tsc_power - prev_tsc_power;
-               if (diff_tsc_power > TIMER_RESOLUTION_CYCLES) {
+               if (diff_tsc_power > tim_res_tsc) {
                        rte_timer_manage();
                        prev_tsc_power = cur_tsc_power;
                }
                        rte_timer_manage();
                        prev_tsc_power = cur_tsc_power;
                }
@@ -999,9 +999,11 @@ start_rx:
                                        turn_on_intr(qconf);
                                        sleep_until_rx_interrupt(
                                                qconf->n_rx_queue);
                                        turn_on_intr(qconf);
                                        sleep_until_rx_interrupt(
                                                qconf->n_rx_queue);
+                                       /**
+                                        * start receiving packets immediately
+                                        */
+                                       goto start_rx;
                                }
                                }
-                               /* start receiving packets immediately */
-                               goto start_rx;
                        }
                        stats[lcore_id].sleep_time += lcore_idle_hint;
                }
                        }
                        stats[lcore_id].sleep_time += lcore_idle_hint;
                }
index 1e9059b..fe23274 100644 (file)
@@ -277,12 +277,6 @@ port_init(uint8_t port)
        /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
        rte_eth_dev_info_get (port, &dev_info);
 
        /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
        rte_eth_dev_info_get (port, &dev_info);
 
-       if (dev_info.max_rx_queues > MAX_QUEUES) {
-               rte_exit(EXIT_FAILURE,
-                       "please define MAX_QUEUES no less than %u in %s\n",
-                       dev_info.max_rx_queues, __FILE__);
-       }
-
        rxconf = &dev_info.default_rxconf;
        txconf = &dev_info.default_txconf;
        rxconf->rx_drop_en = 1;
        rxconf = &dev_info.default_rxconf;
        txconf = &dev_info.default_txconf;
        rxconf->rx_drop_en = 1;
@@ -954,7 +948,8 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
                struct vhost_dev *vdev2;
 
                TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
                struct vhost_dev *vdev2;
 
                TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
-                       virtio_xmit(vdev2, vdev, m);
+                       if (vdev2 != vdev)
+                               virtio_xmit(vdev2, vdev, m);
                }
                goto queue2nic;
        }
                }
                goto queue2nic;
        }
index e8fb908..9676d8b 100644 (file)
@@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/rwlock.h>
 #include <sys/systm.h>
 #include <sys/sysctl.h>
 #include <sys/rwlock.h>
 #include <sys/systm.h>
 #include <sys/sysctl.h>
+#include <sys/vmmeter.h>
 
 #include <machine/bus.h>
 
 
 #include <machine/bus.h>
 
index 3614da8..248312d 100644 (file)
@@ -150,7 +150,7 @@ rte_eal_hugepage_attach(void)
        /* Map the shared hugepage_info into the process address spaces */
        hpi = mmap(NULL, sizeof(struct hugepage_info), PROT_READ, MAP_PRIVATE,
                        fd_hugepage_info, 0);
        /* Map the shared hugepage_info into the process address spaces */
        hpi = mmap(NULL, sizeof(struct hugepage_info), PROT_READ, MAP_PRIVATE,
                        fd_hugepage_info, 0);
-       if (hpi == NULL) {
+       if (hpi == MAP_FAILED) {
                RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
                goto error;
        }
                RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
                goto error;
        }
index 64f4e0a..b58d85b 100644 (file)
@@ -236,7 +236,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
                return NULL;
        }
 
                return NULL;
        }
 
-       const struct malloc_elem *elem = malloc_elem_from_data(mz_addr);
+       struct malloc_elem *elem = malloc_elem_from_data(mz_addr);
 
        /* fill the zone in config */
        mz = get_next_free_memzone();
 
        /* fill the zone in config */
        mz = get_next_free_memzone();
@@ -244,6 +244,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
        if (mz == NULL) {
                RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone but there is room "
                                "in config!\n", __func__);
        if (mz == NULL) {
                RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone but there is room "
                                "in config!\n", __func__);
+               malloc_elem_free(elem);
                rte_errno = ENOSPC;
                return NULL;
        }
                rte_errno = ENOSPC;
                return NULL;
        }
index 367a681..6f91ff9 100644 (file)
@@ -117,7 +117,6 @@ pci_uio_map_resource(struct rte_pci_device *dev)
 
        dev->intr_handle.fd = -1;
        dev->intr_handle.uio_cfg_fd = -1;
 
        dev->intr_handle.fd = -1;
        dev->intr_handle.uio_cfg_fd = -1;
-       dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
 
        /* secondary processes - use already recorded details */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 
        /* secondary processes - use already recorded details */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
index fb4fccb..37f5eff 100644 (file)
@@ -64,9 +64,9 @@ extern "C" {
  * occur before the STORE operations generated after.
  */
 #ifdef RTE_ARCH_64
  * occur before the STORE operations generated after.
  */
 #ifdef RTE_ARCH_64
-#define        rte_wmb() {asm volatile("lwsync" : : : "memory"); }
+#define        rte_wmb() asm volatile("lwsync" : : : "memory")
 #else
 #else
-#define        rte_wmb() {asm volatile("sync" : : : "memory"); }
+#define        rte_wmb() asm volatile("sync" : : : "memory")
 #endif
 
 /**
 #endif
 
 /**
@@ -76,9 +76,9 @@ extern "C" {
  * occur before the LOAD operations generated after.
  */
 #ifdef RTE_ARCH_64
  * occur before the LOAD operations generated after.
  */
 #ifdef RTE_ARCH_64
-#define        rte_rmb() {asm volatile("lwsync" : : : "memory"); }
+#define        rte_rmb() asm volatile("lwsync" : : : "memory")
 #else
 #else
-#define        rte_rmb() {asm volatile("sync" : : : "memory"); }
+#define        rte_rmb() asm volatile("sync" : : : "memory")
 #endif
 
 #define rte_smp_mb() rte_mb()
 #endif
 
 #define rte_smp_mb() rte_mb()
index 00b1cdf..d12b679 100644 (file)
@@ -55,12 +55,52 @@ extern "C" {
 
 #define        rte_rmb() _mm_lfence()
 
 
 #define        rte_rmb() _mm_lfence()
 
-#define rte_smp_mb() rte_mb()
-
 #define rte_smp_wmb() rte_compiler_barrier()
 
 #define rte_smp_rmb() rte_compiler_barrier()
 
 #define rte_smp_wmb() rte_compiler_barrier()
 
 #define rte_smp_rmb() rte_compiler_barrier()
 
+/*
+ * From Intel Software Development Manual; Vol 3;
+ * 8.2.2 Memory Ordering in P6 and More Recent Processor Families:
+ * ...
+ * . Reads are not reordered with other reads.
+ * . Writes are not reordered with older reads.
+ * . Writes to memory are not reordered with other writes,
+ *   with the following exceptions:
+ *   . streaming stores (writes) executed with the non-temporal move
+ *     instructions (MOVNTI, MOVNTQ, MOVNTDQ, MOVNTPS, and MOVNTPD); and
+ *   . string operations (see Section 8.2.4.1).
+ *  ...
+ * . Reads may be reordered with older writes to different locations but not
+ * with older writes to the same location.
+ * . Reads or writes cannot be reordered with I/O instructions,
+ * locked instructions, or serializing instructions.
+ * . Reads cannot pass earlier LFENCE and MFENCE instructions.
+ * . Writes ... cannot pass earlier LFENCE, SFENCE, and MFENCE instructions.
+ * . LFENCE instructions cannot pass earlier reads.
+ * . SFENCE instructions cannot pass earlier writes ...
+ * . MFENCE instructions cannot pass earlier reads, writes ...
+ *
+ * As pointed by Java guys, that makes possible to use lock-prefixed
+ * instructions to get the same effect as mfence and on most modern HW
+ * that gives a better perfomance then using mfence:
+ * https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
+ * Basic idea is to use lock prefixed add with some dummy memory location
+ * as the destination. From their experiments 128B(2 cache lines) below
+ * current stack pointer looks like a good candidate.
+ * So below we use that techinque for rte_smp_mb() implementation.
+ */
+
+static inline void __attribute__((always_inline))
+rte_smp_mb(void)
+{
+#ifdef RTE_ARCH_I686
+       asm volatile("lock addl $0, -128(%%esp); " ::: "memory");
+#else
+       asm volatile("lock addl $0, -128(%%rsp); " ::: "memory");
+#endif
+}
+
 /*------------------------- 16 bit atomic operations -------------------------*/
 
 #ifndef RTE_FORCE_INTRINSICS
 /*------------------------- 16 bit atomic operations -------------------------*/
 
 #ifndef RTE_FORCE_INTRINSICS
index cab6fb4..ec1dce0 100644 (file)
@@ -86,7 +86,7 @@ void rte_dump_registers(void);
 #endif
 #define        RTE_VERIFY(exp) do {                                                  \
        if (unlikely(!(exp)))                                                           \
 #endif
 #define        RTE_VERIFY(exp) do {                                                  \
        if (unlikely(!(exp)))                                                           \
-               rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__); \
+               rte_panic("line %d\tassert \"%s\" failed\n", __LINE__, #exp); \
 } while (0)
 
 /*
 } while (0)
 
 /*
index e92737d..4a9f482 100644 (file)
@@ -66,7 +66,7 @@ extern "C" {
 /**
  * Patch level number i.e. the z in yy.mm.z
  */
 /**
  * Patch level number i.e. the z in yy.mm.z
  */
-#define RTE_VER_MINOR 4
+#define RTE_VER_MINOR 5
 
 /**
  * Extra string to be appended to version number
 
 /**
  * Extra string to be appended to version number
index 77a8615..e2bd3ac 100644 (file)
@@ -98,6 +98,7 @@ elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align,
        if ((new_data_start & bmask) != ((end_pt - 1) & bmask)) {
                end_pt = RTE_ALIGN_FLOOR(end_pt, bound);
                new_data_start = RTE_ALIGN_FLOOR((end_pt - size), align);
        if ((new_data_start & bmask) != ((end_pt - 1) & bmask)) {
                end_pt = RTE_ALIGN_FLOOR(end_pt, bound);
                new_data_start = RTE_ALIGN_FLOOR((end_pt - size), align);
+               end_pt = new_data_start + size;
                if (((end_pt - 1) & bmask) != (new_data_start & bmask))
                        return NULL;
        }
                if (((end_pt - 1) & bmask) != (new_data_start & bmask))
                        return NULL;
        }
index 267a4c6..c731f1c 100644 (file)
@@ -178,12 +178,14 @@ malloc_heap_alloc(struct malloc_heap *heap,
  * Function to retrieve data for heap on given socket
  */
 int
  * Function to retrieve data for heap on given socket
  */
 int
-malloc_heap_get_stats(const struct malloc_heap *heap,
+malloc_heap_get_stats(struct malloc_heap *heap,
                struct rte_malloc_socket_stats *socket_stats)
 {
        size_t idx;
        struct malloc_elem *elem;
 
                struct rte_malloc_socket_stats *socket_stats)
 {
        size_t idx;
        struct malloc_elem *elem;
 
+       rte_spinlock_lock(&heap->lock);
+
        /* Initialise variables for heap */
        socket_stats->free_count = 0;
        socket_stats->heap_freesz_bytes = 0;
        /* Initialise variables for heap */
        socket_stats->free_count = 0;
        socket_stats->heap_freesz_bytes = 0;
@@ -205,6 +207,8 @@ malloc_heap_get_stats(const struct malloc_heap *heap,
        socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
                        socket_stats->heap_freesz_bytes);
        socket_stats->alloc_count = heap->alloc_count;
        socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
                        socket_stats->heap_freesz_bytes);
        socket_stats->alloc_count = heap->alloc_count;
+
+       rte_spinlock_unlock(&heap->lock);
        return 0;
 }
 
        return 0;
 }
 
index 3ccbef0..3b1166f 100644 (file)
@@ -57,7 +57,7 @@ malloc_heap_alloc(struct malloc_heap *heap,   const char *type, size_t size,
                unsigned flags, size_t align, size_t bound);
 
 int
                unsigned flags, size_t align, size_t bound);
 
 int
-malloc_heap_get_stats(const struct malloc_heap *heap,
+malloc_heap_get_stats(struct malloc_heap *heap,
                struct rte_malloc_socket_stats *socket_stats);
 
 int
                struct rte_malloc_socket_stats *socket_stats);
 
 int
index 9765d1b..4625fab 100644 (file)
 
 struct rte_keepalive {
        /** Core Liveness. */
 
 struct rte_keepalive {
        /** Core Liveness. */
-       enum rte_keepalive_state __rte_cache_aligned state_flags[
-               RTE_KEEPALIVE_MAXCORES];
+       struct {
+               /*
+                * Each element must be cache aligned to prevent false sharing.
+                */
+               enum rte_keepalive_state core_state __rte_cache_aligned;
+       } live_data[RTE_KEEPALIVE_MAXCORES];
 
        /** Last-seen-alive timestamps */
        uint64_t last_alive[RTE_KEEPALIVE_MAXCORES];
 
        /** Last-seen-alive timestamps */
        uint64_t last_alive[RTE_KEEPALIVE_MAXCORES];
@@ -96,19 +100,22 @@ rte_keepalive_dispatch_pings(__rte_unused void *ptr_timer,
                if (keepcfg->active_cores[idx_core] == 0)
                        continue;
 
                if (keepcfg->active_cores[idx_core] == 0)
                        continue;
 
-               switch (keepcfg->state_flags[idx_core]) {
+               switch (keepcfg->live_data[idx_core].core_state) {
                case RTE_KA_STATE_UNUSED:
                        break;
                case RTE_KA_STATE_ALIVE: /* Alive */
                case RTE_KA_STATE_UNUSED:
                        break;
                case RTE_KA_STATE_ALIVE: /* Alive */
-                       keepcfg->state_flags[idx_core] = RTE_KA_STATE_MISSING;
+                       keepcfg->live_data[idx_core].core_state =
+                           RTE_KA_STATE_MISSING;
                        keepcfg->last_alive[idx_core] = rte_rdtsc();
                        break;
                case RTE_KA_STATE_MISSING: /* MIA */
                        print_trace("Core MIA. ", keepcfg, idx_core);
                        keepcfg->last_alive[idx_core] = rte_rdtsc();
                        break;
                case RTE_KA_STATE_MISSING: /* MIA */
                        print_trace("Core MIA. ", keepcfg, idx_core);
-                       keepcfg->state_flags[idx_core] = RTE_KA_STATE_DEAD;
+                       keepcfg->live_data[idx_core].core_state =
+                           RTE_KA_STATE_DEAD;
                        break;
                case RTE_KA_STATE_DEAD: /* Dead */
                        break;
                case RTE_KA_STATE_DEAD: /* Dead */
-                       keepcfg->state_flags[idx_core] = RTE_KA_STATE_GONE;
+                       keepcfg->live_data[idx_core].core_state =
+                           RTE_KA_STATE_GONE;
                        print_trace("Core died. ", keepcfg, idx_core);
                        if (keepcfg->callback)
                                keepcfg->callback(
                        print_trace("Core died. ", keepcfg, idx_core);
                        if (keepcfg->callback)
                                keepcfg->callback(
@@ -119,7 +126,8 @@ rte_keepalive_dispatch_pings(__rte_unused void *ptr_timer,
                case RTE_KA_STATE_GONE: /* Buried */
                        break;
                case RTE_KA_STATE_DOZING: /* Core going idle */
                case RTE_KA_STATE_GONE: /* Buried */
                        break;
                case RTE_KA_STATE_DOZING: /* Core going idle */
-                       keepcfg->state_flags[idx_core] = RTE_KA_STATE_SLEEP;
+                       keepcfg->live_data[idx_core].core_state =
+                           RTE_KA_STATE_SLEEP;
                        keepcfg->last_alive[idx_core] = rte_rdtsc();
                        break;
                case RTE_KA_STATE_SLEEP: /* Idled core */
                        keepcfg->last_alive[idx_core] = rte_rdtsc();
                        break;
                case RTE_KA_STATE_SLEEP: /* Idled core */
@@ -129,7 +137,7 @@ rte_keepalive_dispatch_pings(__rte_unused void *ptr_timer,
                        keepcfg->relay_callback(
                                keepcfg->relay_callback_data,
                                idx_core,
                        keepcfg->relay_callback(
                                keepcfg->relay_callback_data,
                                idx_core,
-                               keepcfg->state_flags[idx_core],
+                               keepcfg->live_data[idx_core].core_state,
                                keepcfg->last_alive[idx_core]
                                );
        }
                                keepcfg->last_alive[idx_core]
                                );
        }
@@ -173,11 +181,11 @@ rte_keepalive_register_core(struct rte_keepalive *keepcfg, const int id_core)
 void
 rte_keepalive_mark_alive(struct rte_keepalive *keepcfg)
 {
 void
 rte_keepalive_mark_alive(struct rte_keepalive *keepcfg)
 {
-       keepcfg->state_flags[rte_lcore_id()] = RTE_KA_STATE_ALIVE;
+       keepcfg->live_data[rte_lcore_id()].core_state = RTE_KA_STATE_ALIVE;
 }
 
 void
 rte_keepalive_mark_sleep(struct rte_keepalive *keepcfg)
 {
 }
 
 void
 rte_keepalive_mark_sleep(struct rte_keepalive *keepcfg)
 {
-       keepcfg->state_flags[rte_lcore_id()] = RTE_KA_STATE_DOZING;
+       keepcfg->live_data[rte_lcore_id()].core_state = RTE_KA_STATE_DOZING;
 }
 }
index 876ba38..b0d0c3c 100644 (file)
@@ -623,7 +623,6 @@ pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused,
        if (!found)
                return -1;
 
        if (!found)
                return -1;
 
-       dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
        p->base = start;
        RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start);
 
        p->base = start;
        RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start);
 
index 702f7a2..dd45107 100644 (file)
 static struct vfio_config vfio_cfg;
 
 static int vfio_type1_dma_map(int);
 static struct vfio_config vfio_cfg;
 
 static int vfio_type1_dma_map(int);
+static int vfio_spapr_dma_map(int);
 static int vfio_noiommu_dma_map(int);
 
 /* IOMMU types we support */
 static const struct vfio_iommu_type iommu_types[] = {
        /* x86 IOMMU, otherwise known as type 1 */
        { RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map},
 static int vfio_noiommu_dma_map(int);
 
 /* IOMMU types we support */
 static const struct vfio_iommu_type iommu_types[] = {
        /* x86 IOMMU, otherwise known as type 1 */
        { RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map},
+       /* ppc64 IOMMU, otherwise known as spapr */
+       { RTE_VFIO_SPAPR, "sPAPR", &vfio_spapr_dma_map},
        /* IOMMU-less mode */
        { RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map},
 };
        /* IOMMU-less mode */
        { RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map},
 };
@@ -339,7 +342,7 @@ vfio_enable(const char *modname)
 int
 vfio_is_enabled(const char *modname)
 {
 int
 vfio_is_enabled(const char *modname)
 {
-       const int mod_available = rte_eal_check_module(modname);
+       const int mod_available = rte_eal_check_module(modname) > 0;
        return vfio_cfg.vfio_enabled && mod_available;
 }
 
        return vfio_cfg.vfio_enabled && mod_available;
 }
 
@@ -539,6 +542,93 @@ vfio_type1_dma_map(int vfio_container_fd)
        return 0;
 }
 
        return 0;
 }
 
+static int
+vfio_spapr_dma_map(int vfio_container_fd)
+{
+       const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+       int i, ret;
+
+       struct vfio_iommu_spapr_register_memory reg = {
+               .argsz = sizeof(reg),
+               .flags = 0
+       };
+       struct vfio_iommu_spapr_tce_info info = {
+               .argsz = sizeof(info),
+       };
+       struct vfio_iommu_spapr_tce_create create = {
+               .argsz = sizeof(create),
+       };
+       struct vfio_iommu_spapr_tce_remove remove = {
+               .argsz = sizeof(remove),
+       };
+
+       /* query spapr iommu info */
+       ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
+       if (ret) {
+               RTE_LOG(ERR, EAL, "  cannot get iommu info, "
+                               "error %i (%s)\n", errno, strerror(errno));
+               return -1;
+       }
+
+       /* remove default DMA of 32 bit window */
+       remove.start_addr = info.dma32_window_start;
+       ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
+       if (ret) {
+               RTE_LOG(ERR, EAL, "  cannot remove default DMA window, "
+                               "error %i (%s)\n", errno, strerror(errno));
+               return -1;
+       }
+
+       /* calculate window size based on number of hugepages configured */
+       create.window_size = rte_eal_get_physmem_size();
+       create.page_shift = __builtin_ctzll(ms->hugepage_sz);
+       create.levels = 2;
+
+       ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
+       if (ret) {
+               RTE_LOG(ERR, EAL, "  cannot create new DMA window, "
+                               "error %i (%s)\n", errno, strerror(errno));
+               return -1;
+       }
+
+       /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
+       for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+               struct vfio_iommu_type1_dma_map dma_map;
+
+               if (ms[i].addr == NULL)
+                       break;
+
+               reg.vaddr = (uintptr_t) ms[i].addr;
+               reg.size = ms[i].len;
+               ret = ioctl(vfio_container_fd,
+                       VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
+               if (ret) {
+                       RTE_LOG(ERR, EAL, "  cannot register vaddr for IOMMU, "
+                               "error %i (%s)\n", errno, strerror(errno));
+                       return -1;
+               }
+
+               memset(&dma_map, 0, sizeof(dma_map));
+               dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
+               dma_map.vaddr = ms[i].addr_64;
+               dma_map.size = ms[i].len;
+               dma_map.iova = ms[i].phys_addr;
+               dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
+                                VFIO_DMA_MAP_FLAG_WRITE;
+
+               ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+
+               if (ret) {
+                       RTE_LOG(ERR, EAL, "  cannot set up DMA remapping, "
+                               "error %i (%s)\n", errno, strerror(errno));
+                       return -1;
+               }
+
+       }
+
+       return 0;
+}
+
 static int
 vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
 {
 static int
 vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
 {
index 29f7f3e..ac31a4f 100644 (file)
 
 #define RTE_VFIO_TYPE1 VFIO_TYPE1_IOMMU
 
 
 #define RTE_VFIO_TYPE1 VFIO_TYPE1_IOMMU
 
+#ifndef VFIO_SPAPR_TCE_v2_IOMMU
+#define RTE_VFIO_SPAPR 7
+#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
+#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
+#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
+struct vfio_iommu_spapr_register_memory {
+       uint32_t argsz;
+       uint32_t flags;
+       uint64_t vaddr;
+       uint64_t size;
+};
+struct vfio_iommu_spapr_tce_create {
+       uint32_t argsz;
+       uint32_t page_shift;
+       uint64_t window_size;
+       uint32_t levels;
+};
+struct vfio_iommu_spapr_tce_remove {
+       uint32_t argsz;
+       uint64_t start_addr;
+};
+#else
+#define RTE_VFIO_SPAPR VFIO_SPAPR_TCE_v2_IOMMU
+#endif
+
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
 #define RTE_VFIO_NOIOMMU 8
 #else
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
 #define RTE_VFIO_NOIOMMU 8
 #else
index 0d781e4..3825933 100644 (file)
@@ -123,3 +123,7 @@ static bool pci_check_and_mask_intx(struct pci_dev *pdev)
 }
 
 #endif /* < 3.3.0 */
 }
 
 #endif /* < 3.3.0 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+#define HAVE_ALLOC_IRQ_VECTORS 1
+#endif
index df41e45..9f00f07 100644 (file)
@@ -325,7 +325,9 @@ static int
 igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        struct rte_uio_pci_dev *udev;
 igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        struct rte_uio_pci_dev *udev;
+#ifndef HAVE_ALLOC_IRQ_VECTORS
        struct msix_entry msix_entry;
        struct msix_entry msix_entry;
+#endif
        int err;
 
        udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
        int err;
 
        udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
@@ -379,6 +381,7 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
        switch (igbuio_intr_mode_preferred) {
        case RTE_INTR_MODE_MSIX:
                /* Only 1 msi-x vector needed */
        switch (igbuio_intr_mode_preferred) {
        case RTE_INTR_MODE_MSIX:
                /* Only 1 msi-x vector needed */
+#ifndef HAVE_ALLOC_IRQ_VECTORS
                msix_entry.entry = 0;
                if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
                        dev_dbg(&dev->dev, "using MSI-X");
                msix_entry.entry = 0;
                if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
                        dev_dbg(&dev->dev, "using MSI-X");
@@ -386,6 +389,15 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        udev->mode = RTE_INTR_MODE_MSIX;
                        break;
                }
                        udev->mode = RTE_INTR_MODE_MSIX;
                        break;
                }
+#else
+               if (pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSIX) == 1) {
+                       dev_dbg(&dev->dev, "using MSI-X");
+                       udev->info.irq_flags = IRQF_NO_THREAD;
+                       udev->info.irq = pci_irq_vector(dev, 0);
+                       udev->mode = RTE_INTR_MODE_MSIX;
+                       break;
+               }
+#endif
                /* fall back to INTX */
        case RTE_INTR_MODE_LEGACY:
                if (pci_intx_mask_supported(dev)) {
                /* fall back to INTX */
        case RTE_INTR_MODE_LEGACY:
                if (pci_intx_mask_supported(dev)) {
@@ -429,8 +441,13 @@ fail_remove_group:
        sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
 fail_release_iomem:
        igbuio_pci_release_iomem(&udev->info);
        sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
 fail_release_iomem:
        igbuio_pci_release_iomem(&udev->info);
+#ifndef HAVE_ALLOC_IRQ_VECTORS
        if (udev->mode == RTE_INTR_MODE_MSIX)
                pci_disable_msix(udev->pdev);
        if (udev->mode == RTE_INTR_MODE_MSIX)
                pci_disable_msix(udev->pdev);
+#else
+       if (udev->mode == RTE_INTR_MODE_MSIX)
+               pci_free_irq_vectors(udev->pdev);
+#endif
        pci_disable_device(dev);
 fail_free:
        kfree(udev);
        pci_disable_device(dev);
 fail_free:
        kfree(udev);
@@ -446,8 +463,13 @@ igbuio_pci_remove(struct pci_dev *dev)
        sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
        uio_unregister_device(&udev->info);
        igbuio_pci_release_iomem(&udev->info);
        sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
        uio_unregister_device(&udev->info);
        igbuio_pci_release_iomem(&udev->info);
+#ifndef HAVE_ALLOC_IRQ_VECTORS
        if (udev->mode == RTE_INTR_MODE_MSIX)
                pci_disable_msix(dev);
        if (udev->mode == RTE_INTR_MODE_MSIX)
                pci_disable_msix(dev);
+#else
+       if (udev->mode == RTE_INTR_MODE_MSIX)
+               pci_free_irq_vectors(dev);
+#endif
        pci_disable_device(dev);
        pci_set_drvdata(dev, NULL);
        kfree(udev);
        pci_disable_device(dev);
        pci_set_drvdata(dev, NULL);
        kfree(udev);
index acb1a69..3c683e1 100644 (file)
@@ -137,11 +137,20 @@ static void igb_clean_all_tx_rings(struct igb_adapter *);
 static void igb_clean_all_rx_rings(struct igb_adapter *);
 static void igb_clean_tx_ring(struct igb_ring *);
 static void igb_set_rx_mode(struct net_device *);
 static void igb_clean_all_rx_rings(struct igb_adapter *);
 static void igb_clean_tx_ring(struct igb_ring *);
 static void igb_set_rx_mode(struct net_device *);
+#ifdef HAVE_TIMER_SETUP
+static void igb_update_phy_info(struct timer_list *);
+static void igb_watchdog(struct timer_list *);
+#else
 static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
 static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
+#endif
 static void igb_watchdog_task(struct work_struct *);
 static void igb_dma_err_task(struct work_struct *);
 static void igb_watchdog_task(struct work_struct *);
 static void igb_dma_err_task(struct work_struct *);
+#ifdef HAVE_TIMER_SETUP
+static void igb_dma_err_timer(struct timer_list *);
+#else
 static void igb_dma_err_timer(unsigned long data);
 static void igb_dma_err_timer(unsigned long data);
+#endif
 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
 static struct net_device_stats *igb_get_stats(struct net_device *);
 static int igb_change_mtu(struct net_device *, int);
 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
 static struct net_device_stats *igb_get_stats(struct net_device *);
 static int igb_change_mtu(struct net_device *, int);
@@ -2806,6 +2815,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
        /* Check if Media Autosense is enabled */
        if (hw->mac.type == e1000_82580)
                igb_init_mas(adapter);
        /* Check if Media Autosense is enabled */
        if (hw->mac.type == e1000_82580)
                igb_init_mas(adapter);
+#ifdef HAVE_TIMER_SETUP
+       timer_setup(&adapter->watchdog_timer, &igb_watchdog, 0);
+       if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+               timer_setup(&adapter->dma_err_timer, &igb_dma_err_timer, 0);
+       timer_setup(&adapter->phy_info_timer, &igb_update_phy_info, 0);
+#else
        setup_timer(&adapter->watchdog_timer, &igb_watchdog,
                    (unsigned long) adapter);
        if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
        setup_timer(&adapter->watchdog_timer, &igb_watchdog,
                    (unsigned long) adapter);
        if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
@@ -2813,6 +2828,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
                            (unsigned long) adapter);
        setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
                    (unsigned long) adapter);
                            (unsigned long) adapter);
        setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
                    (unsigned long) adapter);
+#endif
 
        INIT_WORK(&adapter->reset_task, igb_reset_task);
        INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
 
        INIT_WORK(&adapter->reset_task, igb_reset_task);
        INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
@@ -4543,9 +4559,15 @@ static void igb_spoof_check(struct igb_adapter *adapter)
 
 /* Need to wait a few seconds after link up to get diagnostic information from
  * the phy */
 
 /* Need to wait a few seconds after link up to get diagnostic information from
  * the phy */
+#ifdef HAVE_TIMER_SETUP
+static void igb_update_phy_info(struct timer_list *t)
+{
+       struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+#else
 static void igb_update_phy_info(unsigned long data)
 {
        struct igb_adapter *adapter = (struct igb_adapter *) data;
 static void igb_update_phy_info(unsigned long data)
 {
        struct igb_adapter *adapter = (struct igb_adapter *) data;
+#endif
        e1000_get_phy_info(&adapter->hw);
 }
 
        e1000_get_phy_info(&adapter->hw);
 }
 
@@ -4594,9 +4616,15 @@ bool igb_has_link(struct igb_adapter *adapter)
  * igb_watchdog - Timer Call-back
  * @data: pointer to adapter cast into an unsigned long
  **/
  * igb_watchdog - Timer Call-back
  * @data: pointer to adapter cast into an unsigned long
  **/
+#ifdef HAVE_TIMER_SETUP
+static void igb_watchdog(struct timer_list *t)
+{
+       struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+#else
 static void igb_watchdog(unsigned long data)
 {
        struct igb_adapter *adapter = (struct igb_adapter *)data;
 static void igb_watchdog(unsigned long data)
 {
        struct igb_adapter *adapter = (struct igb_adapter *)data;
+#endif
        /* Do the rest outside of interrupt context */
        schedule_work(&adapter->watchdog_task);
 }
        /* Do the rest outside of interrupt context */
        schedule_work(&adapter->watchdog_task);
 }
@@ -4854,9 +4882,15 @@ dma_timer_reset:
  * igb_dma_err_timer - Timer Call-back
  * @data: pointer to adapter cast into an unsigned long
  **/
  * igb_dma_err_timer - Timer Call-back
  * @data: pointer to adapter cast into an unsigned long
  **/
+#ifdef HAVE_TIMER_SETUP
+static void igb_dma_err_timer(struct timer_list *t)
+{
+       struct igb_adapter *adapter = from_timer(adapter, t, dma_err_timer);
+#else
 static void igb_dma_err_timer(unsigned long data)
 {
        struct igb_adapter *adapter = (struct igb_adapter *)data;
 static void igb_dma_err_timer(unsigned long data)
 {
        struct igb_adapter *adapter = (struct igb_adapter *)data;
+#endif
        /* Do the rest outside of interrupt context */
        schedule_work(&adapter->dma_err_task);
 }
        /* Do the rest outside of interrupt context */
        schedule_work(&adapter->dma_err_task);
 }
@@ -10051,6 +10085,12 @@ int igb_kni_probe(struct pci_dev *pdev,
                igb_init_mas(adapter);
 
 #ifdef NO_KNI
                igb_init_mas(adapter);
 
 #ifdef NO_KNI
+#ifdef HAVE_TIMER_SETUP
+       timer_setup(&adapter->watchdog_timer, &igb_watchdog, 0);
+       if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+               timer_setup(&adapter->dma_err_timer, &igb_dma_err_timer, 0);
+       timer_setup(&adapter->phy_info_timer, &igb_update_phy_info, 0);
+#else
        setup_timer(&adapter->watchdog_timer, &igb_watchdog,
                    (unsigned long) adapter);
        if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
        setup_timer(&adapter->watchdog_timer, &igb_watchdog,
                    (unsigned long) adapter);
        if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
@@ -10058,6 +10098,7 @@ int igb_kni_probe(struct pci_dev *pdev,
                            (unsigned long) adapter);
        setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
                    (unsigned long) adapter);
                            (unsigned long) adapter);
        setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
                    (unsigned long) adapter);
+#endif
 
        INIT_WORK(&adapter->reset_task, igb_reset_task);
        INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
 
        INIT_WORK(&adapter->reset_task, igb_reset_task);
        INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
index aea253b..88bd18e 100644 (file)
@@ -3937,4 +3937,8 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type)
 #define HAVE_PCI_ENABLE_MSIX
 #endif
 
 #define HAVE_PCI_ENABLE_MSIX
 #endif
 
+#if defined(timer_setup) && defined(from_timer)
+#define HAVE_TIMER_SETUP
+#endif
+
 #endif /* _KCOMPAT_H_ */
 #endif /* _KCOMPAT_H_ */
index ea54525..4679dc6 100644 (file)
@@ -94,6 +94,7 @@ static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
        {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
        {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
        {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
        {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
        {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
        {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
+       {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
        {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
        {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
        {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
        {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
        {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
        {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
index 11ec1fa..dc6d0cc 100644 (file)
@@ -262,17 +262,17 @@ __extension__
 struct rte_eth_link {
        uint32_t link_speed;        /**< ETH_SPEED_NUM_ */
        uint16_t link_duplex  : 1;  /**< ETH_LINK_[HALF/FULL]_DUPLEX */
 struct rte_eth_link {
        uint32_t link_speed;        /**< ETH_SPEED_NUM_ */
        uint16_t link_duplex  : 1;  /**< ETH_LINK_[HALF/FULL]_DUPLEX */
-       uint16_t link_autoneg : 1;  /**< ETH_LINK_SPEED_[AUTONEG/FIXED] */
+       uint16_t link_autoneg : 1;  /**< ETH_LINK_[AUTONEG/FIXED] */
        uint16_t link_status  : 1;  /**< ETH_LINK_[DOWN/UP] */
 } __attribute__((aligned(8)));      /**< aligned for atomic64 read/write */
 
 /* Utility constants */
        uint16_t link_status  : 1;  /**< ETH_LINK_[DOWN/UP] */
 } __attribute__((aligned(8)));      /**< aligned for atomic64 read/write */
 
 /* Utility constants */
-#define ETH_LINK_HALF_DUPLEX    0 /**< Half-duplex connection. */
-#define ETH_LINK_FULL_DUPLEX    1 /**< Full-duplex connection. */
-#define ETH_LINK_DOWN           0 /**< Link is down. */
-#define ETH_LINK_UP             1 /**< Link is up. */
-#define ETH_LINK_FIXED          0 /**< No autonegotiation. */
-#define ETH_LINK_AUTONEG        1 /**< Autonegotiated. */
+#define ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
+#define ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
+#define ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
+#define ETH_LINK_UP          1 /**< Link is up (see link_status). */
+#define ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
+#define ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
 
 /**
  * A structure used to configure the ring threshold registers of an RX/TX
 
 /**
  * A structure used to configure the ring threshold registers of an RX/TX
@@ -1694,7 +1694,7 @@ struct rte_eth_dev_data {
        enum rte_kernel_driver kdrv;    /**< Kernel driver passthrough */
        int numa_node;  /**< NUMA node connection */
        const char *drv_name;   /**< Driver name */
        enum rte_kernel_driver kdrv;    /**< Kernel driver passthrough */
        int numa_node;  /**< NUMA node connection */
        const char *drv_name;   /**< Driver name */
-};
+} __rte_cache_aligned;
 
 /** Device supports hotplug detach */
 #define RTE_ETH_DEV_DETACHABLE   0x0001
 
 /** Device supports hotplug detach */
 #define RTE_ETH_DEV_DETACHABLE   0x0001
@@ -1965,7 +1965,7 @@ int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
  *   the DMA memory allocated for the transmit descriptors of the ring.
  * @param tx_conf
  *   The pointer to the configuration data to be used for the transmit queue.
  *   the DMA memory allocated for the transmit descriptors of the ring.
  * @param tx_conf
  *   The pointer to the configuration data to be used for the transmit queue.
- *   NULL value is allowed, in which case default RX configuration
+ *   NULL value is allowed, in which case default TX configuration
  *   will be used.
  *   The *tx_conf* structure contains the following data:
  *   - The *tx_thresh* structure with the values of the Prefetch, Host, and
  *   will be used.
  *   The *tx_conf* structure contains the following data:
  *   - The *tx_thresh* structure with the values of the Prefetch, Host, and
index 978ac60..56fc8b0 100644 (file)
@@ -908,7 +908,7 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
                 */
 
                struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
                 */
 
                struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
-                       { .group_idx = (uint8_t)tbl8_group_index, },
+                       .group_idx = (uint8_t)tbl8_group_index,
                        .valid = VALID,
                        .valid_group = 1,
                        .depth = 0,
                        .valid = VALID,
                        .valid_group = 1,
                        .depth = 0,
@@ -954,7 +954,7 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
                 */
 
                struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
                 */
 
                struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
-                               { .group_idx = (uint8_t)tbl8_group_index, },
+                               .group_idx = (uint8_t)tbl8_group_index,
                                .valid = VALID,
                                .valid_group = 1,
                                .depth = 0,
                                .valid = VALID,
                                .valid_group = 1,
                                .depth = 0,
@@ -1361,7 +1361,7 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
                 */
 
                struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
                 */
 
                struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
-                       {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
+                       .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
                        .valid = VALID,
                        .valid_group = 0,
                        .depth = sub_rule_depth,
                        .valid = VALID,
                        .valid_group = 0,
                        .depth = sub_rule_depth,
@@ -1664,7 +1664,7 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
        } else if (tbl8_recycle_index > -1) {
                /* Update tbl24 entry. */
                struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
        } else if (tbl8_recycle_index > -1) {
                /* Update tbl24 entry. */
                struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
-                       { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
+                       .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
                        .valid = VALID,
                        .valid_group = 0,
                        .depth = lpm->tbl8[tbl8_recycle_index].depth,
                        .valid = VALID,
                        .valid_group = 0,
                        .depth = lpm->tbl8[tbl8_recycle_index].depth,
index 8a814df..bc015d0 100644 (file)
@@ -1233,13 +1233,14 @@ rte_pktmbuf_free_seg(struct rte_mbuf *m)
  * segment is added back into its original mempool.
  *
  * @param m
  * segment is added back into its original mempool.
  *
  * @param m
- *   The packet mbuf to be freed.
+ *   The packet mbuf to be freed. If NULL, the function does nothing.
  */
 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
 {
        struct rte_mbuf *m_next;
 
  */
 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
 {
        struct rte_mbuf *m_next;
 
-       __rte_mbuf_sanity_check(m, 1);
+       if (m != NULL)
+               __rte_mbuf_sanity_check(m, 1);
 
        while (m != NULL) {
                m_next = m->next;
 
        while (m != NULL) {
                m_next = m->next;
@@ -1361,12 +1362,10 @@ static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
  */
 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
 {
  */
 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
 {
-       struct rte_mbuf *m2 = (struct rte_mbuf *)m;
-
        __rte_mbuf_sanity_check(m, 1);
        __rte_mbuf_sanity_check(m, 1);
-       while (m2->next != NULL)
-               m2 = m2->next;
-       return m2;
+       while (m->next != NULL)
+               m = m->next;
+       return m;
 }
 
 /**
 }
 
 /**
index e64bf59..1a5147a 100644 (file)
@@ -582,7 +582,7 @@ rte_pdump_init(const char *path)
        if (ret != 0) {
                RTE_LOG(ERR, PDUMP,
                        "Failed to create the pdump thread:%s, %s:%d\n",
        if (ret != 0) {
                RTE_LOG(ERR, PDUMP,
                        "Failed to create the pdump thread:%s, %s:%d\n",
-                       strerror(errno), __func__, __LINE__);
+                       strerror(ret), __func__, __LINE__);
                return -1;
        }
        /* Set thread_name for aid in debugging. */
                return -1;
        }
        /* Set thread_name for aid in debugging. */
@@ -605,7 +605,7 @@ rte_pdump_uninit(void)
        if (ret != 0) {
                RTE_LOG(ERR, PDUMP,
                        "Failed to cancel the pdump thread:%s, %s:%d\n",
        if (ret != 0) {
                RTE_LOG(ERR, PDUMP,
                        "Failed to cancel the pdump thread:%s, %s:%d\n",
-                       strerror(errno), __func__, __LINE__);
+                       strerror(ret), __func__, __LINE__);
                return -1;
        }
 
                return -1;
        }
 
index 84e0595..a95fbfb 100644 (file)
@@ -438,7 +438,7 @@ vhost_user_reconnect_init(void)
 
        ret = pthread_create(&reconn_tid, NULL,
                             vhost_user_client_reconnect, NULL);
 
        ret = pthread_create(&reconn_tid, NULL,
                             vhost_user_client_reconnect, NULL);
-       if (ret < 0)
+       if (ret != 0)
                RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
 
        return ret;
                RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
 
        return ret;
@@ -525,7 +525,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
        if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
                vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
                if (vsocket->reconnect && reconn_tid == 0) {
        if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
                vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
                if (vsocket->reconnect && reconn_tid == 0) {
-                       if (vhost_user_reconnect_init() < 0) {
+                       if (vhost_user_reconnect_init() != 0) {
                                free(vsocket->path);
                                free(vsocket);
                                goto out;
                                free(vsocket->path);
                                free(vsocket);
                                goto out;
index 3c3f6a4..36fdfb5 100644 (file)
@@ -202,6 +202,8 @@ alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
        dev->virtqueue[virt_tx_q_idx] = virtqueue + VIRTIO_TXQ;
 
        init_vring_queue_pair(dev, qp_idx);
        dev->virtqueue[virt_tx_q_idx] = virtqueue + VIRTIO_TXQ;
 
        init_vring_queue_pair(dev, qp_idx);
+       rte_spinlock_init(&dev->virtqueue[virt_rx_q_idx]->access_lock);
+       rte_spinlock_init(&dev->virtqueue[virt_tx_q_idx]->access_lock);
 
        dev->virt_qp_nb += 1;
 
 
        dev->virt_qp_nb += 1;
 
index d97df1d..9f60ff8 100644 (file)
@@ -91,6 +91,8 @@ struct vhost_virtqueue {
 
        /* Backend value to determine if device should started/stopped */
        int                     backend;
 
        /* Backend value to determine if device should started/stopped */
        int                     backend;
+       rte_spinlock_t          access_lock;
+
        /* Used to notify the guest (trigger interrupt) */
        int                     callfd;
        /* Currently unused as polling mode is enabled */
        /* Used to notify the guest (trigger interrupt) */
        int                     callfd;
        /* Currently unused as polling mode is enabled */
index d25e1c0..80348db 100644 (file)
@@ -39,6 +39,7 @@
 #include <sys/mman.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/mman.h>
 #include <sys/types.h>
 #include <sys/stat.h>
+#include <stdbool.h>
 #include <assert.h>
 #ifdef RTE_LIBRTE_VHOST_NUMA
 #include <numaif.h>
 #include <assert.h>
 #ifdef RTE_LIBRTE_VHOST_NUMA
 #include <numaif.h>
@@ -488,6 +489,30 @@ dump_guest_pages(struct virtio_net *dev)
 #define dump_guest_pages(dev)
 #endif
 
 #define dump_guest_pages(dev)
 #endif
 
+static bool
+vhost_memory_changed(struct VhostUserMemory *new,
+                     struct virtio_memory *old)
+{
+       uint32_t i;
+
+       if (new->nregions != old->nregions)
+               return true;
+
+       for (i = 0; i < new->nregions; ++i) {
+               VhostUserMemoryRegion *new_r = &new->regions[i];
+               struct virtio_memory_region *old_r = &old->regions[i];
+
+               if (new_r->guest_phys_addr != old_r->guest_phys_addr)
+                       return true;
+               if (new_r->memory_size != old_r->size)
+                       return true;
+               if (new_r->userspace_addr != old_r->guest_user_addr)
+                       return true;
+       }
+
+       return false;
+}
+
 static int
 vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
 {
 static int
 vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
 {
@@ -500,6 +525,16 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
        uint32_t i;
        int fd;
 
        uint32_t i;
        int fd;
 
+       if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
+               RTE_LOG(INFO, VHOST_CONFIG,
+                       "(%d) memory regions not changed\n", dev->vid);
+
+               for (i = 0; i < memory.nregions; i++)
+                       close(pmsg->fds[i]);
+
+               return 0;
+       }
+
        /* Remove from the data plane. */
        if (dev->flags & VIRTIO_DEV_RUNNING) {
                dev->flags &= ~VIRTIO_DEV_RUNNING;
        /* Remove from the data plane. */
        if (dev->flags & VIRTIO_DEV_RUNNING) {
                dev->flags &= ~VIRTIO_DEV_RUNNING;
@@ -917,12 +952,47 @@ send_vhost_message(int sockfd, struct VhostUserMsg *msg)
        return ret;
 }
 
        return ret;
 }
 
+static void
+vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
+{
+       unsigned int i = 0;
+       unsigned int vq_num = 0;
+
+       while (vq_num < dev->virt_qp_nb * 2) {
+               struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+               if (vq) {
+                       rte_spinlock_lock(&vq->access_lock);
+                       vq_num++;
+               }
+               i++;
+       }
+}
+
+static void
+vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
+{
+       unsigned int i = 0;
+       unsigned int vq_num = 0;
+
+       while (vq_num < dev->virt_qp_nb * 2) {
+               struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+               if (vq) {
+                       rte_spinlock_unlock(&vq->access_lock);
+                       vq_num++;
+               }
+               i++;
+       }
+}
+
 int
 vhost_user_msg_handler(int vid, int fd)
 {
        struct virtio_net *dev;
        struct VhostUserMsg msg;
        int ret;
 int
 vhost_user_msg_handler(int vid, int fd)
 {
        struct virtio_net *dev;
        struct VhostUserMsg msg;
        int ret;
+       int unlock_required = 0;
 
        dev = get_device(vid);
        if (dev == NULL)
 
        dev = get_device(vid);
        if (dev == NULL)
@@ -945,6 +1015,37 @@ vhost_user_msg_handler(int vid, int fd)
 
        RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
                vhost_message_str[msg.request]);
 
        RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
                vhost_message_str[msg.request]);
+
+       /*
+        * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
+        * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
+        * and device is destroyed. destroy_device waits for queues to be
+        * inactive, so it is safe. Otherwise taking the access_lock
+        * would cause a dead lock.
+        */
+       switch (msg.request) {
+       case VHOST_USER_SET_FEATURES:
+       case VHOST_USER_SET_PROTOCOL_FEATURES:
+       case VHOST_USER_SET_OWNER:
+       case VHOST_USER_SET_MEM_TABLE:
+       case VHOST_USER_SET_LOG_BASE:
+       case VHOST_USER_SET_LOG_FD:
+       case VHOST_USER_SET_VRING_NUM:
+       case VHOST_USER_SET_VRING_ADDR:
+       case VHOST_USER_SET_VRING_BASE:
+       case VHOST_USER_SET_VRING_KICK:
+       case VHOST_USER_SET_VRING_CALL:
+       case VHOST_USER_SET_VRING_ERR:
+       case VHOST_USER_SET_VRING_ENABLE:
+       case VHOST_USER_SEND_RARP:
+               vhost_user_lock_all_queue_pairs(dev);
+               unlock_required = 1;
+               break;
+       default:
+               break;
+
+       }
+
        switch (msg.request) {
        case VHOST_USER_GET_FEATURES:
                msg.payload.u64 = vhost_user_get_features();
        switch (msg.request) {
        case VHOST_USER_GET_FEATURES:
                msg.payload.u64 = vhost_user_get_features();
@@ -1034,5 +1135,8 @@ vhost_user_msg_handler(int vid, int fd)
 
        }
 
 
        }
 
+       if (unlock_required)
+               vhost_user_unlock_all_queue_pairs(dev);
+
        return 0;
 }
        return 0;
 }
index 0027f39..0024f72 100644 (file)
@@ -44,6 +44,7 @@
 #include <rte_udp.h>
 #include <rte_sctp.h>
 #include <rte_arp.h>
 #include <rte_udp.h>
 #include <rte_sctp.h>
 #include <rte_arp.h>
+#include <rte_spinlock.h>
 
 #include "vhost.h"
 
 
 #include "vhost.h"
 
@@ -313,8 +314,11 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        }
 
        vq = dev->virtqueue[queue_id];
        }
 
        vq = dev->virtqueue[queue_id];
+
+       rte_spinlock_lock(&vq->access_lock);
+
        if (unlikely(vq->enabled == 0))
        if (unlikely(vq->enabled == 0))
-               return 0;
+               goto out_access_unlock;
 
        avail_idx = *((volatile uint16_t *)&vq->avail->idx);
        start_idx = vq->last_used_idx;
 
        avail_idx = *((volatile uint16_t *)&vq->avail->idx);
        start_idx = vq->last_used_idx;
@@ -322,7 +326,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        count = RTE_MIN(count, free_entries);
        count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
        if (count == 0)
        count = RTE_MIN(count, free_entries);
        count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
        if (count == 0)
-               return 0;
+               goto out_access_unlock;
 
        LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
                dev->vid, start_idx, start_idx + count);
 
        LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
                dev->vid, start_idx, start_idx + count);
@@ -388,6 +392,10 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
                        && (vq->callfd >= 0))
                eventfd_write(vq->callfd, (eventfd_t)1);
        if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
                        && (vq->callfd >= 0))
                eventfd_write(vq->callfd, (eventfd_t)1);
+
+out_access_unlock:
+       rte_spinlock_unlock(&vq->access_lock);
+
        return count;
 }
 
        return count;
 }
 
@@ -582,12 +590,15 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
        }
 
        vq = dev->virtqueue[queue_id];
        }
 
        vq = dev->virtqueue[queue_id];
+
+       rte_spinlock_lock(&vq->access_lock);
+
        if (unlikely(vq->enabled == 0))
        if (unlikely(vq->enabled == 0))
-               return 0;
+               goto out_access_unlock;
 
        count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
        if (count == 0)
 
        count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
        if (count == 0)
-               return 0;
+               goto out_access_unlock;
 
        rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
 
 
        rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
 
@@ -631,6 +642,9 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
                        eventfd_write(vq->callfd, (eventfd_t)1);
        }
 
                        eventfd_write(vq->callfd, (eventfd_t)1);
        }
 
+out_access_unlock:
+       rte_spinlock_unlock(&vq->access_lock);
+
        return pkt_idx;
 }
 
        return pkt_idx;
 }
 
@@ -875,7 +889,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
                                        desc->addr + desc_offset, cpy_len)))) {
                        cur->data_len = cpy_len;
                        cur->data_off = 0;
                                        desc->addr + desc_offset, cpy_len)))) {
                        cur->data_len = cpy_len;
                        cur->data_off = 0;
-                       cur->buf_addr = (void *)(uintptr_t)desc_addr;
+                       cur->buf_addr = (void *)(uintptr_t)(desc_addr
+                               + desc_offset);
                        cur->buf_physaddr = hpa;
 
                        /*
                        cur->buf_physaddr = hpa;
 
                        /*
@@ -1027,6 +1042,22 @@ mbuf_is_consumed(struct rte_mbuf *m)
        return true;
 }
 
        return true;
 }
 
+static inline void __attribute__((always_inline))
+restore_mbuf(struct rte_mbuf *m)
+{
+       uint32_t mbuf_size, priv_size;
+
+       while (m) {
+               priv_size = rte_pktmbuf_priv_size(m->pool);
+               mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+               /* start of buffer is after mbuf structure and priv data */
+
+               m->buf_addr = (char *)m + mbuf_size;
+               m->buf_physaddr = rte_mempool_virt2phy(NULL, m) + mbuf_size;
+               m = m->next;
+       }
+}
+
 uint16_t
 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
 uint16_t
 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -1051,9 +1082,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        }
 
        vq = dev->virtqueue[queue_id];
        }
 
        vq = dev->virtqueue[queue_id];
-       if (unlikely(vq->enabled == 0))
+
+       if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
                return 0;
 
                return 0;
 
+       if (unlikely(vq->enabled == 0))
+               goto out_access_unlock;
+
        if (unlikely(dev->dequeue_zero_copy)) {
                struct zcopy_mbuf *zmbuf, *next;
                int nr_updated = 0;
        if (unlikely(dev->dequeue_zero_copy)) {
                struct zcopy_mbuf *zmbuf, *next;
                int nr_updated = 0;
@@ -1069,6 +1104,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                                nr_updated += 1;
 
                                TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
                                nr_updated += 1;
 
                                TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+                               restore_mbuf(zmbuf->mbuf);
                                rte_pktmbuf_free(zmbuf->mbuf);
                                put_zmbuf(zmbuf);
                                vq->nr_zmbuf -= 1;
                                rte_pktmbuf_free(zmbuf->mbuf);
                                put_zmbuf(zmbuf);
                                vq->nr_zmbuf -= 1;
@@ -1102,7 +1138,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                if (rarp_mbuf == NULL) {
                        RTE_LOG(ERR, VHOST_DATA,
                                "Failed to allocate memory for mbuf.\n");
                if (rarp_mbuf == NULL) {
                        RTE_LOG(ERR, VHOST_DATA,
                                "Failed to allocate memory for mbuf.\n");
-                       return 0;
+                       goto out_access_unlock;
                }
 
                if (make_rarp_packet(rarp_mbuf, &dev->mac)) {
                }
 
                if (make_rarp_packet(rarp_mbuf, &dev->mac)) {
@@ -1116,7 +1152,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        free_entries = *((volatile uint16_t *)&vq->avail->idx) -
                        vq->last_avail_idx;
        if (free_entries == 0)
        free_entries = *((volatile uint16_t *)&vq->avail->idx) -
                        vq->last_avail_idx;
        if (free_entries == 0)
-               goto out;
+               goto out_access_unlock;
 
        LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
 
 
        LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
 
@@ -1209,7 +1245,9 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                update_used_idx(dev, vq, i);
        }
 
                update_used_idx(dev, vq, i);
        }
 
-out:
+out_access_unlock:
+       rte_spinlock_unlock(&vq->access_lock);
+
        if (unlikely(rarp_mbuf != NULL)) {
                /*
                 * Inject it to the head of "pkts" array, so that switch's mac
        if (unlikely(rarp_mbuf != NULL)) {
                /*
                 * Inject it to the head of "pkts" array, so that switch's mac
index e2462fd..ea41ebf 100644 (file)
@@ -48,7 +48,7 @@ ifeq ("$(origin M)", "command line")
 RTE_EXTMK := $(abspath $(M))
 endif
 endif
 RTE_EXTMK := $(abspath $(M))
 endif
 endif
-RTE_EXTMK ?= $(RTE_SRCDIR)/Makefile
+RTE_EXTMK ?= $(RTE_SRCDIR)/$(notdir $(firstword $(MAKEFILE_LIST)))
 export RTE_EXTMK
 
 # RTE_SDK_BIN must point to .config, include/ and lib/.
 export RTE_EXTMK
 
 # RTE_SDK_BIN must point to .config, include/ and lib/.
index 638be79..76bab30 100644 (file)
@@ -30,7 +30,7 @@
 # OF THE POSSIBILITY OF SUCH DAMAGE.
 
 Name: dpdk
 # OF THE POSSIBILITY OF SUCH DAMAGE.
 
 Name: dpdk
-Version: 16.11.4
+Version: 16.11.5
 Release: 1
 Packager: packaging@6wind.com
 URL: http://dpdk.org
 Release: 1
 Packager: packaging@6wind.com
 URL: http://dpdk.org
index e4c6d52..e7cfcd0 100755 (executable)
@@ -126,40 +126,6 @@ def check_output(args, stderr=None):
                             stderr=stderr).communicate()[0]
 
 
                             stderr=stderr).communicate()[0]
 
 
-def find_module(mod):
-    '''find the .ko file for kernel module named mod.
-    Searches the $RTE_SDK/$RTE_TARGET directory, the kernel
-    modules directory and finally under the parent directory of
-    the script '''
-    # check $RTE_SDK/$RTE_TARGET directory
-    if 'RTE_SDK' in os.environ and 'RTE_TARGET' in os.environ:
-        path = "%s/%s/kmod/%s.ko" % (os.environ['RTE_SDK'],
-                                     os.environ['RTE_TARGET'], mod)
-        if exists(path):
-            return path
-
-    # check using depmod
-    try:
-        depmod_out = check_output(["modinfo", "-n", mod],
-                                  stderr=subprocess.STDOUT).lower()
-        if "error" not in depmod_out:
-            path = depmod_out.strip()
-            if exists(path):
-                return path
-    except:  # if modinfo can't find module, it fails, so continue
-        pass
-
-    # check for a copy based off current path
-    tools_dir = dirname(abspath(sys.argv[0]))
-    if (tools_dir.endswith("tools")):
-        base_dir = dirname(tools_dir)
-        find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
-        if len(find_out) > 0:  # something matched
-            path = find_out.splitlines()[0]
-            if exists(path):
-                return path
-
-
 def check_modules():
     '''Checks that igb_uio is loaded'''
     global dpdk_drivers
 def check_modules():
     '''Checks that igb_uio is loaded'''
     global dpdk_drivers