New upstream version 16.11.4 70/9770/1
authorLuca Boccassi <luca.boccassi@gmail.com>
Fri, 8 Dec 2017 17:16:13 +0000 (17:16 +0000)
committerLuca Boccassi <luca.boccassi@gmail.com>
Fri, 8 Dec 2017 17:16:56 +0000 (17:16 +0000)
Change-Id: I733e0292d2e060161d148b3e114065d00b36d2ba
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
108 files changed:
app/test-pmd/cmdline.c
app/test-pmd/config.c
app/test-pmd/parameters.c
app/test-pmd/testpmd.c
app/test/test_link_bonding_mode4.c
app/test/test_memzone.c
app/test/test_pmd_perf.c
buildtools/pmdinfogen/pmdinfogen.c
config/defconfig_arm-armv7a-linuxapp-gcc
doc/guides/rel_notes/release_16_11.rst
drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
drivers/crypto/null/null_crypto_pmd_ops.c
drivers/crypto/openssl/rte_openssl_pmd_ops.c
drivers/crypto/qat/qat_crypto.c
drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
drivers/crypto/zuc/rte_zuc_pmd_ops.c
drivers/net/bnxt/bnxt.h
drivers/net/bnxt/bnxt_cpr.c
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_hwrm.c
drivers/net/bnxt/bnxt_irq.c
drivers/net/bnxt/bnxt_rxr.c
drivers/net/bnxt/bnxt_rxr.h
drivers/net/bnxt/bnxt_txr.c
drivers/net/bnxt/bnxt_txr.h
drivers/net/bonding/rte_eth_bond_8023ad.c
drivers/net/cxgbe/base/t4_hw.c
drivers/net/e1000/igb_ethdev.c
drivers/net/enic/enic_main.c
drivers/net/i40e/base/i40e_osdep.h
drivers/net/i40e/i40e_ethdev.c
drivers/net/i40e/i40e_ethdev_vf.c
drivers/net/i40e/i40e_pf.c
drivers/net/i40e/i40e_rxtx.c
drivers/net/i40e/i40e_rxtx_vec_neon.c
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/ixgbe/ixgbe_pf.c
drivers/net/ixgbe/ixgbe_rxtx.c
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5_ethdev.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.c
drivers/net/mlx5/mlx5_rxtx.h
drivers/net/mlx5/mlx5_stats.c
drivers/net/mlx5/mlx5_utils.h
drivers/net/nfp/nfp_net.c
drivers/net/pcap/rte_eth_pcap.c
drivers/net/qede/Makefile
drivers/net/qede/base/ecore.h
drivers/net/qede/base/ecore_cxt.c
drivers/net/qede/base/ecore_dcbx.c
drivers/net/qede/base/ecore_dev.c
drivers/net/qede/base/ecore_hw.c
drivers/net/qede/base/ecore_hw.h
drivers/net/qede/base/ecore_mcp.c
drivers/net/qede/base/ecore_sriov.c
drivers/net/qede/base/ecore_vf.c
drivers/net/virtio/virtio_ethdev.c
drivers/net/virtio/virtio_ethdev.h
drivers/net/virtio/virtio_rxtx.c
drivers/net/virtio/virtio_rxtx_simple.c
drivers/net/virtio/virtio_user/vhost_user.c
drivers/net/virtio/virtqueue.c
drivers/net/virtio/virtqueue.h
drivers/net/vmxnet3/vmxnet3_ethdev.c
drivers/net/vmxnet3/vmxnet3_rxtx.c
examples/ipsec-secgw/esp.c
examples/ipsec-secgw/ipip.h
examples/l2fwd-cat/Makefile
examples/l2fwd-cat/cat.c
examples/l2fwd-crypto/main.c
examples/l3fwd-acl/main.c
examples/multi_process/l2fwd_fork/main.c
examples/multi_process/simple_mp/main.c
examples/multi_process/simple_mp/mp_commands.c
examples/multi_process/simple_mp/mp_commands.h
examples/performance-thread/common/lthread.h
examples/performance-thread/common/lthread_sched.c
examples/performance-thread/common/lthread_tls.c
examples/performance-thread/pthread_shim/main.c
examples/qos_sched/init.c
lib/librte_cmdline/cmdline.c
lib/librte_cmdline/cmdline_parse.c
lib/librte_cryptodev/rte_crypto.h
lib/librte_eal/bsdapp/eal/eal_interrupts.c
lib/librte_eal/bsdapp/eal/rte_eal_version.map
lib/librte_eal/common/arch/arm/rte_cpuflags.c
lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c
lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
lib/librte_eal/common/include/rte_version.h
lib/librte_eal/common/malloc_elem.c
lib/librte_eal/linuxapp/eal/eal.c
lib/librte_eal/linuxapp/eal/eal_pci_uio.c
lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
lib/librte_eal/linuxapp/kni/compat.h
lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
lib/librte_hash/rte_cuckoo_hash.c
lib/librte_lpm/rte_lpm6.c
lib/librte_net/rte_net.c
lib/librte_pdump/rte_pdump.c
lib/librte_ring/rte_ring.h
lib/librte_timer/rte_timer.c
pkg/dpdk.spec
scripts/auto-config-h.sh
tools/dpdk-devbind.py

index 315a252..f4ff318 100644 (file)
@@ -1568,7 +1568,7 @@ cmd_config_rss_parsed(void *parsed_result,
                        __attribute__((unused)) void *data)
 {
        struct cmd_config_rss *res = parsed_result;
-       struct rte_eth_rss_conf rss_conf;
+       struct rte_eth_rss_conf rss_conf = { .rss_key_len = 0, };
        int diag;
        uint8_t i;
 
index 36c47ab..c50b62e 100644 (file)
@@ -224,8 +224,10 @@ nic_stats_display(portid_t port_id)
        if (diff_cycles > 0)
                diff_cycles = prev_cycles[port_id] - diff_cycles;
 
-       diff_pkts_rx = stats.ipackets - prev_pkts_rx[port_id];
-       diff_pkts_tx = stats.opackets - prev_pkts_tx[port_id];
+       diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
+               (stats.ipackets - prev_pkts_rx[port_id]) : 0;
+       diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
+               (stats.opackets - prev_pkts_tx[port_id]) : 0;
        prev_pkts_rx[port_id] = stats.ipackets;
        prev_pkts_tx[port_id] = stats.opackets;
        mpps_rx = diff_cycles > 0 ?
@@ -1197,7 +1199,7 @@ simple_fwd_config_setup(void)
                fwd_streams[i]->rx_queue  = 0;
                fwd_streams[i]->tx_port   = fwd_ports_ids[j];
                fwd_streams[i]->tx_queue  = 0;
-               fwd_streams[i]->peer_addr = j;
+               fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
                fwd_streams[i]->retry_enabled = retry_enabled;
 
                if (port_topology == PORT_TOPOLOGY_PAIRED) {
@@ -1205,7 +1207,7 @@ simple_fwd_config_setup(void)
                        fwd_streams[j]->rx_queue  = 0;
                        fwd_streams[j]->tx_port   = fwd_ports_ids[i];
                        fwd_streams[j]->tx_queue  = 0;
-                       fwd_streams[j]->peer_addr = i;
+                       fwd_streams[j]->peer_addr = fwd_streams[j]->tx_port;
                        fwd_streams[j]->retry_enabled = retry_enabled;
                }
        }
index 2f96953..0923391 100644 (file)
@@ -360,7 +360,8 @@ parse_portnuma_config(const char *q_arg)
        char s[256];
        const char *p, *p0 = q_arg;
        char *end;
-       uint8_t i,port_id,socket_id;
+       uint8_t i, socket_id;
+       portid_t port_id;
        unsigned size;
        enum fieldnames {
                FLD_PORT = 0,
@@ -390,8 +391,9 @@ parse_portnuma_config(const char *q_arg)
                        if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
                                return -1;
                }
-               port_id = (uint8_t)int_fld[FLD_PORT];
-               if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+               port_id = (portid_t)int_fld[FLD_PORT];
+               if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+                       port_id == (portid_t)RTE_PORT_ALL) {
                        printf("Valid port range is [0");
                        FOREACH_PORT(pid, ports)
                                printf(", %d", pid);
@@ -416,7 +418,8 @@ parse_ringnuma_config(const char *q_arg)
        char s[256];
        const char *p, *p0 = q_arg;
        char *end;
-       uint8_t i,port_id,ring_flag,socket_id;
+       uint8_t i, ring_flag, socket_id;
+       portid_t port_id;
        unsigned size;
        enum fieldnames {
                FLD_PORT = 0,
@@ -450,8 +453,9 @@ parse_ringnuma_config(const char *q_arg)
                        if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
                                return -1;
                }
-               port_id = (uint8_t)int_fld[FLD_PORT];
-               if (port_id_is_invalid(port_id, ENABLED_WARN)) {
+               port_id = (portid_t)int_fld[FLD_PORT];
+               if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+                       port_id == (portid_t)RTE_PORT_ALL) {
                        printf("Valid port range is [0");
                        FOREACH_PORT(pid, ports)
                                printf(", %d", pid);
@@ -806,7 +810,7 @@ launch_args_parse(int argc, char** argv)
                                        port_topology = PORT_TOPOLOGY_LOOP;
                                else
                                        rte_exit(EXIT_FAILURE, "port-topology %s invalid -"
-                                                " must be: paired or chained \n",
+                                                " must be: paired, chained or loop\n",
                                                 optarg);
                        }
                        if (!strcmp(lgopts[opt_idx].name, "forward-mode"))
index ce48ca0..9de01fe 100644 (file)
@@ -1900,8 +1900,8 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
                                1 << (i % vmdq_rx_conf->nb_queue_pools);
                }
                for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
-                       vmdq_rx_conf->dcb_tc[i] = i;
-                       vmdq_tx_conf->dcb_tc[i] = i;
+                       vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
+                       vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
                }
 
                /* set DCB mode of RX and TX of multiple queues */
index 292ea3d..3396711 100644 (file)
@@ -659,7 +659,7 @@ bond_handshake(void)
        TEST_ASSERT_EQUAL(all_slaves_done, 1, "Bond handshake failed\n");
 
        /* If flags doesn't match - report failure */
-       return all_slaves_done = 1 ? TEST_SUCCESS : TEST_FAILED;
+       return all_slaves_done == 1 ? TEST_SUCCESS : TEST_FAILED;
 }
 
 #define TEST_LACP_SLAVE_COUT RTE_DIM(test_params.slave_ports)
index 7ae31cf..72cda00 100644 (file)
@@ -176,6 +176,10 @@ test_memzone_reserve_flags(void)
                        printf("hugepage_sz not equal 2M\n");
                        return -1;
                }
+               if (rte_memzone_free(mz)) {
+                       printf("Fail memzone free\n");
+                       return -1;
+               }
 
                mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
                                RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
@@ -187,6 +191,10 @@ test_memzone_reserve_flags(void)
                        printf("hugepage_sz not equal 2M\n");
                        return -1;
                }
+               if (rte_memzone_free(mz)) {
+                       printf("Fail memzone free\n");
+                       return -1;
+               }
 
                /* Check if 1GB huge pages are unavailable, that function fails unless
                 * HINT flag is indicated
@@ -202,6 +210,10 @@ test_memzone_reserve_flags(void)
                                printf("hugepage_sz not equal 2M\n");
                                return -1;
                        }
+                       if (rte_memzone_free(mz)) {
+                               printf("Fail memzone free\n");
+                               return -1;
+                       }
 
                        mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
                                        RTE_MEMZONE_1GB);
@@ -224,6 +236,10 @@ test_memzone_reserve_flags(void)
                        printf("hugepage_sz not equal 1G\n");
                        return -1;
                }
+               if (rte_memzone_free(mz)) {
+                       printf("Fail memzone free\n");
+                       return -1;
+               }
 
                mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
                                RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
@@ -235,6 +251,10 @@ test_memzone_reserve_flags(void)
                        printf("hugepage_sz not equal 1G\n");
                        return -1;
                }
+               if (rte_memzone_free(mz)) {
+                       printf("Fail memzone free\n");
+                       return -1;
+               }
 
                /* Check if 1GB huge pages are unavailable, that function fails unless
                 * HINT flag is indicated
@@ -250,12 +270,20 @@ test_memzone_reserve_flags(void)
                                printf("hugepage_sz not equal 1G\n");
                                return -1;
                        }
+                       if (rte_memzone_free(mz)) {
+                               printf("Fail memzone free\n");
+                               return -1;
+                       }
                        mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
                                        RTE_MEMZONE_2MB);
                        if (mz != NULL) {
                                printf("MEMZONE FLAG 2MB\n");
                                return -1;
                        }
+                       if (rte_memzone_free(mz)) {
+                               printf("Fail memzone free\n");
+                               return -1;
+                       }
                }
 
                if (hugepage_2MB_avail && hugepage_1GB_avail) {
@@ -285,6 +313,10 @@ test_memzone_reserve_flags(void)
                        printf("hugepage_sz not equal 16M\n");
                        return -1;
                }
+               if (rte_memzone_free(mz)) {
+                       printf("Fail memzone free\n");
+                       return -1;
+               }
 
                mz = rte_memzone_reserve("flag_zone_16M_HINT", size,
                SOCKET_ID_ANY, RTE_MEMZONE_16MB|RTE_MEMZONE_SIZE_HINT_ONLY);
@@ -296,6 +328,10 @@ test_memzone_reserve_flags(void)
                        printf("hugepage_sz not equal 16M\n");
                        return -1;
                }
+               if (rte_memzone_free(mz)) {
+                       printf("Fail memzone free\n");
+                       return -1;
+               }
 
                /* Check if 1GB huge pages are unavailable, that function fails
                 * unless HINT flag is indicated
@@ -312,6 +348,10 @@ test_memzone_reserve_flags(void)
                                printf("hugepage_sz not equal 16M\n");
                                return -1;
                        }
+                       if (rte_memzone_free(mz)) {
+                               printf("Fail memzone free\n");
+                               return -1;
+                       }
 
                        mz = rte_memzone_reserve("flag_zone_16G", size,
                                SOCKET_ID_ANY, RTE_MEMZONE_16GB);
@@ -333,6 +373,10 @@ test_memzone_reserve_flags(void)
                        printf("hugepage_sz not equal 16G\n");
                        return -1;
                }
+               if (rte_memzone_free(mz)) {
+                       printf("Fail memzone free\n");
+                       return -1;
+               }
 
                mz = rte_memzone_reserve("flag_zone_16G_HINT", size,
                SOCKET_ID_ANY, RTE_MEMZONE_16GB|RTE_MEMZONE_SIZE_HINT_ONLY);
@@ -344,6 +388,10 @@ test_memzone_reserve_flags(void)
                        printf("hugepage_sz not equal 16G\n");
                        return -1;
                }
+               if (rte_memzone_free(mz)) {
+                       printf("Fail memzone free\n");
+                       return -1;
+               }
 
                /* Check if 1GB huge pages are unavailable, that function fails
                 * unless HINT flag is indicated
@@ -360,6 +408,10 @@ test_memzone_reserve_flags(void)
                                printf("hugepage_sz not equal 16G\n");
                                return -1;
                        }
+                       if (rte_memzone_free(mz)) {
+                               printf("Fail memzone free\n");
+                               return -1;
+                       }
                        mz = rte_memzone_reserve("flag_zone_16M", size,
                                SOCKET_ID_ANY, RTE_MEMZONE_16MB);
                        if (mz != NULL) {
@@ -434,6 +486,12 @@ test_memzone_reserve_max(void)
                rte_memzone_dump(stdout);
                return -1;
        }
+
+       if (rte_memzone_free(mz)) {
+               printf("Fail memzone free\n");
+               return -1;
+       }
+
        return 0;
 }
 
@@ -473,6 +531,12 @@ test_memzone_reserve_max_aligned(void)
                rte_memzone_dump(stdout);
                return -1;
        }
+
+       if (rte_memzone_free(mz)) {
+               printf("Fail memzone free\n");
+               return -1;
+       }
+
        return 0;
 }
 
@@ -593,6 +657,28 @@ test_memzone_aligned(void)
        if (is_memory_overlap(memzone_aligned_512->phys_addr, memzone_aligned_512->len,
                                        memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
                return -1;
+
+       /* free all used zones */
+       if (rte_memzone_free(memzone_aligned_32)) {
+               printf("Fail memzone free\n");
+               return -1;
+       }
+       if (rte_memzone_free(memzone_aligned_128)) {
+               printf("Fail memzone free\n");
+               return -1;
+       }
+       if (rte_memzone_free(memzone_aligned_256)) {
+               printf("Fail memzone free\n");
+               return -1;
+       }
+       if (rte_memzone_free(memzone_aligned_512)) {
+               printf("Fail memzone free\n");
+               return -1;
+       }
+       if (rte_memzone_free(memzone_aligned_1024)) {
+               printf("Fail memzone free\n");
+               return -1;
+       }
        return 0;
 }
 
@@ -638,6 +724,11 @@ check_memzone_bounded(const char *name, uint32_t len,  uint32_t align,
                return -1;
        }
 
+       if (rte_memzone_free(mz)) {
+               printf("Fail memzone free\n");
+               return -1;
+       }
+
        return 0;
 }
 
@@ -758,7 +849,7 @@ test_memzone_free(void)
 }
 
 static int
-test_memzone(void)
+test_memzone_basic(void)
 {
        const struct rte_memzone *memzone1;
        const struct rte_memzone *memzone2;
@@ -837,6 +928,40 @@ test_memzone(void)
        if (mz != NULL)
                return -1;
 
+       if (rte_memzone_free(memzone1)) {
+               printf("Fail memzone free - memzone1\n");
+               return -1;
+       }
+       if (rte_memzone_free(memzone2)) {
+               printf("Fail memzone free - memzone2\n");
+               return -1;
+       }
+       if (memzone3 && rte_memzone_free(memzone3)) {
+               printf("Fail memzone free - memzone3\n");
+               return -1;
+       }
+       if (rte_memzone_free(memzone4)) {
+               printf("Fail memzone free - memzone4\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static int memzone_calk_called;
+static void memzone_walk_clb(const struct rte_memzone *mz __rte_unused,
+                            void *arg __rte_unused)
+{
+       memzone_calk_called = 1;
+}
+
+static int
+test_memzone(void)
+{
+       printf("test basic memzone API\n");
+       if (test_memzone_basic() < 0)
+               return -1;
+
        printf("test free memzone\n");
        if (test_memzone_free() < 0)
                return -1;
@@ -869,6 +994,14 @@ test_memzone(void)
        if (test_memzone_reserve_max_aligned() < 0)
                return -1;
 
+       printf("check memzone cleanup\n");
+       rte_memzone_walk(memzone_walk_clb, NULL);
+       if (memzone_calk_called) {
+               printf("there are some memzones left after test\n");
+               rte_memzone_dump(stdout);
+               return -1;
+       }
+
        return 0;
 }
 
index 1ffd65a..afab180 100644 (file)
@@ -572,6 +572,7 @@ poll_burst(void *args)
        unsigned i, portid, nb_rx = 0;
        uint64_t total;
        uint64_t timeout = MAX_IDLE;
+       int num[RTE_MAX_ETHPORTS];
 
        lcore_id = rte_lcore_id();
        conf = &lcore_conf[lcore_id];
@@ -591,6 +592,7 @@ poll_burst(void *args)
        for (i = 0; i < conf->nb_ports; i++) {
                portid = conf->portlist[i];
                next[portid] = i * pkt_per_port;
+               num[portid] = pkt_per_port;
        }
 
        while (!rte_atomic64_read(&start))
@@ -601,8 +603,8 @@ poll_burst(void *args)
                for (i = 0; i < conf->nb_ports; i++) {
                        portid = conf->portlist[i];
                        nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
-                                                &pkts_burst[next[portid]],
-                                                MAX_PKT_BURST);
+                                       &pkts_burst[next[portid]],
+                                       RTE_MIN(MAX_PKT_BURST, num[portid]));
                        if (unlikely(nb_rx == 0)) {
                                timeout--;
                                if (unlikely(timeout == 0))
@@ -610,6 +612,7 @@ poll_burst(void *args)
                                continue;
                        }
                        next[portid] += nb_rx;
+                       num[portid] -= nb_rx;
                        total -= nb_rx;
                }
        }
@@ -618,7 +621,6 @@ timeout:
 
        printf("%"PRIu64" packets lost, IDLE %"PRIu64" times\n",
               total, MAX_IDLE - timeout);
-
        /* clean up */
        total = pkt_per_port * conf->nb_ports - total;
        for (i = 0; i < total; i++)
@@ -644,7 +646,7 @@ exec_burst(uint32_t flags, int lcore)
        conf = &lcore_conf[lcore];
 
        pkt_per_port = MAX_TRAFFIC_BURST;
-       num = pkt_per_port;
+       num = pkt_per_port * conf->nb_ports;
 
        rte_atomic64_init(&start);
 
@@ -661,11 +663,12 @@ exec_burst(uint32_t flags, int lcore)
                nb_tx = RTE_MIN(MAX_PKT_BURST, num);
                for (i = 0; i < conf->nb_ports; i++) {
                        portid = conf->portlist[i];
-                       rte_eth_tx_burst(portid, 0,
+                       nb_tx = rte_eth_tx_burst(portid, 0,
                                         &tx_burst[idx], nb_tx);
                        idx += nb_tx;
+                       num -= nb_tx;
                }
-               num -= nb_tx;
+
        }
 
        sleep(5);
index 5bf08ce..df10a2f 100644 (file)
@@ -326,6 +326,10 @@ static int locate_pmd_entries(struct elf_info *info)
 
        do {
                new = calloc(sizeof(struct pmd_driver), 1);
+               if (new == NULL) {
+                       fprintf(stderr, "Failed to calloc memory\n");
+                       return -1;
+               }
                new->name_sym = find_sym_in_symtab(info, "this_pmd_name", last);
                last = new->name_sym;
                if (!new->name_sym)
@@ -407,7 +411,8 @@ int main(int argc, char **argv)
        }
        parse_elf(&info, argv[1]);
 
-       locate_pmd_entries(&info);
+       if (locate_pmd_entries(&info) < 0)
+               exit(1);
 
        if (info.drivers) {
                output_pmd_info_string(&info, argv[2]);
index bde6acd..b634f52 100644 (file)
@@ -73,5 +73,5 @@ CONFIG_RTE_LIBRTE_MLX4_PMD=n
 CONFIG_RTE_LIBRTE_MPIPE_PMD=n
 CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
 CONFIG_RTE_LIBRTE_PMD_XENVIRT=n
-CONFIG_RTE_LIBRTE_PMD_BNX2X=n
+CONFIG_RTE_LIBRTE_BNX2X_PMD=n
 CONFIG_RTE_LIBRTE_QEDE_PMD=n
index 45cdbf6..ec6c661 100644 (file)
@@ -913,3 +913,118 @@ Fixes in 16.11 LTS Release
 * vhost: fix IP checksum
 * vhost: fix TCP checksum
 * vhost: make page logging atomic
+
+16.11.4
+~~~~~~~
+
+* app/testpmd: fix forwarding between non consecutive ports
+* app/testpmd: fix invalid port id parameters
+* app/testpmd: fix mapping of user priority to DCB TC
+* app/testpmd: fix packet throughput after stats reset
+* app/testpmd: fix RSS structure initialisation
+* app/testpmd: fix topology error message
+* buildtools: check allocation error in pmdinfogen
+* buildtools: fix icc build
+* cmdline: fix compilation with -Og
+* cmdline: fix warning for unused return value
+* config: fix bnx2x option for armv7a
+* cryptodev: fix build with -Ofast
+* crypto/qat: fix SHA512-HMAC supported key size
+* drivers/crypto: use snprintf return value correctly
+* eal/bsd: fix missing interrupt stub functions
+* eal: copy raw strings taken from command line
+* eal: fix auxv open check for ARM and PPC
+* eal/x86: fix atomic cmpset
+* examples/ipsec-secgw: fix IPv6 payload length
+* examples/ipsec-secgw: fix IP version check
+* examples/l2fwd-cat: fix build with PQOS 1.4
+* examples/l2fwd-crypto: fix uninitialized errno value
+* examples/l2fwd_fork: fix message pool init
+* examples/l3fwd-acl: check fseek return
+* examples/multi_process: fix received message length
+* examples/performance-thread: check thread creation
+* examples/performance-thread: fix out-of-bounds sched array
+* examples/performance-thread: fix out-of-bounds tls array
+* examples/qos_sched: fix uninitialized config
+* hash: fix eviction counter
+* kni: fix build on RHEL 7.4
+* kni: fix build on SLE12 SP3
+* kni: fix ethtool build with kernel 4.11
+* lpm6: fix compilation with -Og
+* mem: fix malloc element free in debug mode
+* net/bnxt: fix a bit shift operation
+* net/bnxt: fix an issue with broadcast traffic
+* net/bnxt: fix a potential null pointer dereference
+* net/bnxt: fix interrupt handler
+* net/bnxt: fix link handling and configuration
+* net/bnxt: fix Rx offload capability
+* net/bnxt: fix Tx offload capability
+* net/bnxt: set checksum offload flags correctly
+* net/bnxt: update status of Rx IP/L4 CKSUM
+* net/bonding: fix LACP slave deactivate behavioral
+* net/cxgbe: fix memory leak
+* net/enic: fix assignment
+* net/enic: fix packet loss after MTU change
+* net/enic: fix possible null pointer dereference
+* net: fix inner L2 length in packet type parser
+* net/i40e/base: fix bool definition
+* net/i40e: fix clear xstats bug in VF
+* net/i40e: fix flexible payload configuration
+* net/i40e: fix flow control watermark mismatch
+* net/i40e: fix i40evf MAC filter table
+* net/i40e: fix mbuf free in vector Tx
+* net/i40e: fix memory leak if VF init fails
+* net/i40e: fix mirror rule reset when port is closed
+* net/i40e: fix mirror with firmware 6.0
+* net/i40e: fix packet count for PF
+* net/i40e: fix PF notify issue when VF is not up
+* net/i40e: fix Rx packets number for NEON
+* net/i40e: fix Rx queue interrupt mapping in VF
+* net/i40e: fix uninitialized variable
+* net/i40e: fix variable assignment
+* net/i40e: fix VF cannot forward packets issue
+* net/i40e: fix VFIO interrupt mapping in VF
+* net/igb: fix memcpy length
+* net/igb: fix Rx interrupt with VFIO and MSI-X
+* net/ixgbe: fix adding a mirror rule
+* net/ixgbe: fix mapping of user priority to TC
+* net/ixgbe: fix PF DCB info
+* net/ixgbe: fix uninitialized variable
+* net/ixgbe: fix VFIO interrupt mapping in VF
+* net/ixgbe: fix VF RX hang
+* net/mlx5: fix clang build
+* net/mlx5: fix clang compilation error
+* net/mlx5: fix link speed bitmasks
+* net/mlx5: fix probe failure report
+* net/mlx5: fix Tx stats error counter definition
+* net/mlx5: fix Tx stats error counter logic
+* net/mlx5: improve stack usage during link update
+* net/nfp: fix RSS
+* net/nfp: fix stats struct initial value
+* net/pcap: fix memory leak in dumper open
+* net/qede/base: fix API return types
+* net/qede/base: fix division by zero
+* net/qede/base: fix for VF malicious indication
+* net/qede/base: fix macros to check chip revision/metal
+* net/qede/base: fix number of app table entries
+* net/qede/base: fix return code to align with FW
+* net/qede/base: fix to use a passed ptt handle
+* net/qede: fix icc build
+* net/virtio: fix compilation with -Og
+* net/virtio: fix mbuf port for simple Rx function
+* net/virtio: fix queue setup consistency
+* net/virtio: fix Tx packet length stats
+* net/virtio: fix untrusted scalar value
+* net/virtio: flush Rx queues on start
+* net/vmxnet3: fix dereference before null check
+* net/vmxnet3: fix MAC address set
+* net/vmxnet3: fix memory leak when releasing queues
+* pdump: fix possible mbuf leak on failure
+* ring: guarantee load/load order in enqueue and dequeue
+* test: fix assignment operation
+* test/memzone: fix memory leak
+* test/pmd_perf: fix crash with multiple devices
+* timer: use 64-bit specific code on more platforms
+* uio: fix compilation with -Og
+* usertools: fix device binding with python 3
+* vfio: fix close unchecked file descriptor
index c51f82a..d4ff651 100644 (file)
@@ -184,7 +184,7 @@ aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
                        "aesni_gcm_pmd_%u_qp_%u",
                        dev->data->dev_id, qp->id);
 
-       if (n > sizeof(qp->name))
+       if (n >= sizeof(qp->name))
                return -1;
 
        return 0;
index 287c8a5..e531c88 100644 (file)
@@ -333,7 +333,7 @@ aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
                        "aesni_mb_pmd_%u_qp_%u",
                        dev->data->dev_id, qp->id);
 
-       if (n > sizeof(qp->name))
+       if (n >= sizeof(qp->name))
                return -1;
 
        return 0;
index b9285a4..8f8695d 100644 (file)
@@ -186,7 +186,7 @@ kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
                        "kasumi_pmd_%u_qp_%u",
                        dev->data->dev_id, qp->id);
 
-       if (n > sizeof(qp->name))
+       if (n >= sizeof(qp->name))
                return -1;
 
        return 0;
index 26ff631..421f21e 100644 (file)
@@ -178,7 +178,7 @@ null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
                        "null_crypto_pmd_%u_qp_%u",
                        dev->data->dev_id, qp->id);
 
-       if (n > sizeof(qp->name))
+       if (n >= sizeof(qp->name))
                return -1;
 
        return 0;
index a072e6e..7bf82e1 100644 (file)
@@ -543,7 +543,7 @@ openssl_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
                        "openssl_pmd_%u_qp_%u",
                        dev->data->dev_id, qp->id);
 
-       if (n > sizeof(qp->name))
+       if (n >= sizeof(qp->name))
                return -1;
 
        return 0;
index f7fcece..8b830b8 100644 (file)
@@ -162,7 +162,7 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
                                .key_size = {
                                        .min = 1,
                                        .max = 128,
-                                       .increment = 128
+                                       .increment = 1
                                },
                                .digest_size = {
                                        .min = 64,
index 4602dfd..7cb47c0 100644 (file)
@@ -183,7 +183,7 @@ snow3g_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
                        "snow3g_pmd_%u_qp_%u",
                        dev->data->dev_id, qp->id);
 
-       if (n > sizeof(qp->name))
+       if (n >= sizeof(qp->name))
                return -1;
 
        return 0;
index 2c886d5..620a9da 100644 (file)
@@ -183,7 +183,7 @@ zuc_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
                        "zuc_pmd_%u_qp_%u",
                        dev->data->dev_id, qp->id);
 
-       if (n > sizeof(qp->name))
+       if (n >= sizeof(qp->name))
                return -1;
 
        return 0;
index 4418c7f..ff3c240 100644 (file)
@@ -112,6 +112,8 @@ struct bnxt_link_info {
        uint16_t                auto_link_speed;
        uint16_t                auto_link_speed_mask;
        uint32_t                preemphasis;
+       uint8_t                 phy_type;
+       uint8_t                 media_type;
 };
 
 #define BNXT_COS_QUEUE_COUNT   8
index 3aedcb8..6f88ac9 100644 (file)
@@ -55,7 +55,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
        case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
        case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
        case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
-               bnxt_link_update_op(bp->eth_dev, 0);
+               bnxt_link_update_op(bp->eth_dev, 1);
                break;
        default:
                RTE_LOG(ERR, PMD, "handle_async_event id = 0x%x\n", event_id);
@@ -123,8 +123,10 @@ void bnxt_free_def_cp_ring(struct bnxt *bp)
                return;
 
        bnxt_free_ring(cpr->cp_ring_struct);
+       cpr->cp_ring_struct = NULL;
        rte_free(cpr->cp_ring_struct);
        rte_free(cpr);
+       bp->def_cp_ring = NULL;
 }
 
 /* For the default completion ring only */
index 035fe07..b6feb5a 100644 (file)
@@ -106,6 +106,8 @@ static struct rte_pci_id bnxt_pci_id_map[] = {
        ETH_RSS_NONFRAG_IPV6_TCP |      \
        ETH_RSS_NONFRAG_IPV6_UDP)
 
+static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
+
 /***********************/
 
 /*
@@ -261,6 +263,7 @@ static int bnxt_init_chip(struct bnxt *bp)
                        goto err_out;
                }
        }
+       bnxt_print_link_info(bp->eth_dev);
 
        return 0;
 
@@ -325,8 +328,13 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
        dev_info->min_rx_bufsize = 1;
        dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
                                  + VLAN_TAG_SIZE;
-       dev_info->rx_offload_capa = 0;
-       dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
+       dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+                                       DEV_RX_OFFLOAD_IPV4_CKSUM |
+                                       DEV_RX_OFFLOAD_UDP_CKSUM |
+                                       DEV_RX_OFFLOAD_TCP_CKSUM |
+                                       DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+       dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+                                       DEV_TX_OFFLOAD_IPV4_CKSUM |
                                        DEV_TX_OFFLOAD_TCP_CKSUM |
                                        DEV_TX_OFFLOAD_UDP_CKSUM |
                                        DEV_TX_OFFLOAD_TCP_TSO;
@@ -410,20 +418,6 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
-static inline int
-rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev,
-                               struct rte_eth_link *link)
-{
-       struct rte_eth_link *dst = &eth_dev->data->dev_link;
-       struct rte_eth_link *src = link;
-
-       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-                                       *(uint64_t *)src) == 0)
-               return 1;
-
-       return 0;
-}
-
 static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
 {
        struct rte_eth_link *link = &eth_dev->data->dev_link;
@@ -476,7 +470,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
 
        bnxt_enable_int(bp);
 
-       bnxt_link_update_op(eth_dev, 0);
+       bnxt_link_update_op(eth_dev, 1);
        return 0;
 
 error:
@@ -492,9 +486,14 @@ error:
 static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
 {
        struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       int rc = 0;
+
+       if (!bp->link_info.link_up)
+               rc = bnxt_set_hwrm_link_config(bp, true);
+       if (!rc)
+               eth_dev->data->dev_link.link_status = 1;
 
-       eth_dev->data->dev_link.link_status = 1;
-       bnxt_set_hwrm_link_config(bp, true);
+       bnxt_print_link_info(eth_dev);
        return 0;
 }
 
@@ -504,6 +503,8 @@ static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
 
        eth_dev->data->dev_link.link_status = 0;
        bnxt_set_hwrm_link_config(bp, false);
+       bp->link_info.link_up = 0;
+
        return 0;
 }
 
@@ -550,13 +551,14 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
        uint64_t pool_mask = eth_dev->data->mac_pool_sel[index];
        struct bnxt_vnic_info *vnic;
        struct bnxt_filter_info *filter, *temp_filter;
-       int i;
+       uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS);
+       uint32_t i;
 
        /*
         * Loop through all VNICs from the specified filter flow pools to
         * remove the corresponding MAC addr filter
         */
-       for (i = 0; i < MAX_FF_POOLS; i++) {
+       for (i = 0; i < pool; i++) {
                if (!(pool_mask & (1ULL << i)))
                        continue;
 
@@ -645,7 +647,8 @@ out:
        /* Timed out or success */
        if (new.link_status != eth_dev->data->dev_link.link_status ||
        new.link_speed != eth_dev->data->dev_link.link_speed) {
-               rte_bnxt_atomic_write_link_status(eth_dev, &new);
+               memcpy(&eth_dev->data->dev_link, &new,
+                       sizeof(struct rte_eth_link));
                bnxt_print_link_info(eth_dev);
        }
 
index 93910d8..619bc97 100644 (file)
@@ -177,8 +177,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
        if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
                mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
-       req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
-                                   mask);
+       req.mask = rte_cpu_to_le_32(mask);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -482,34 +481,38 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
        struct hwrm_port_phy_cfg_input req = {0};
        struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
        uint32_t enables = 0;
-       uint32_t link_speed_mask =
-               HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
 
        HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
 
        if (conf->link_up) {
+               /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
+               if (bp->link_info.auto_mode && conf->link_speed) {
+                       req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
+                       RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
+               }
+
                req.flags = rte_cpu_to_le_32(conf->phy_flags);
                req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
+               enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
                /*
                 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
                 * any auto mode, even "none".
                 */
                if (!conf->link_speed) {
-                       req.auto_mode = conf->auto_mode;
-                       enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
-                       if (conf->auto_mode ==
-                           HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
-                               req.auto_link_speed_mask =
-                                       conf->auto_link_speed_mask;
-                               enables |= link_speed_mask;
-                       }
-                       if (bp->link_info.auto_link_speed) {
-                               req.auto_link_speed =
-                                       bp->link_info.auto_link_speed;
-                               enables |=
-                               HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
-                       }
+                       /* No speeds specified. Enable AutoNeg - all speeds */
+                       req.auto_mode =
+                               HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
+               }
+               /* AutoNeg - Advertise speeds specified. */
+               if (conf->auto_link_speed_mask) {
+                       req.auto_mode =
+                               HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
+                       req.auto_link_speed_mask =
+                               conf->auto_link_speed_mask;
+                       enables |=
+                       HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
                }
+
                req.auto_duplex = conf->duplex;
                enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
                req.auto_pause = conf->auto_pause;
@@ -557,6 +560,8 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
        link_info->auto_pause = resp->auto_pause;
        link_info->force_pause = resp->force_pause;
        link_info->auto_mode = resp->auto_mode;
+       link_info->phy_type = resp->phy_type;
+       link_info->media_type = resp->media_type;
 
        link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
        link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
@@ -1275,6 +1280,11 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
        return hw_link_duplex;
 }
 
+static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
+{
+       return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
+}
+
 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
 {
        uint16_t eth_link_speed = 0;
@@ -1483,7 +1493,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
        int rc = 0;
        struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
        struct bnxt_link_info link_req;
-       uint16_t speed;
+       uint16_t speed, autoneg;
 
        if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
                return 0;
@@ -1498,20 +1508,28 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
        if (!link_up)
                goto port_phy_cfg;
 
+       autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
        speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
        link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
-       if (speed == 0) {
+       if (autoneg == 1) {
                link_req.phy_flags |=
                                HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
-               link_req.auto_mode =
-                               HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
                link_req.auto_link_speed_mask =
                        bnxt_parse_eth_link_speed_mask(bp,
                                                       dev_conf->link_speeds);
        } else {
+               if (bp->link_info.phy_type ==
+                   HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
+                   bp->link_info.phy_type ==
+                   HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
+                   bp->link_info.media_type ==
+                   HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
+                       RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
+                       return -EINVAL;
+               }
+
                link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
                link_req.link_speed = speed;
-               RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
        }
        link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
        link_req.auto_pause = bp->link_info.auto_pause;
index e93585a..851f39c 100644 (file)
@@ -51,11 +51,18 @@ static void bnxt_int_handler(struct rte_intr_handle *handle __rte_unused,
        struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
        struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
        struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
-       uint32_t raw_cons = cpr->cp_raw_cons;
-       uint32_t cons;
        struct cmpl_base *cmp;
+       uint32_t raw_cons;
+       uint32_t cons;
 
+       if (cpr == NULL)
+               return;
+
+       raw_cons = cpr->cp_raw_cons;
        while (1) {
+               if (!cpr || !cpr->cp_ring_struct)
+                       return;
+
                cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
                cmp = &cpr->cp_desc_ring[cons];
 
index 5d93de2..980f3ec 100644 (file)
@@ -148,6 +148,17 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
        }
 
        rx_buf->mbuf = NULL;
+
+       if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
+               mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+       else
+               mbuf->ol_flags |= PKT_RX_IP_CKSUM_NONE;
+
+       if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
+               mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+       else
+               mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+
        if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
                /* Re-install the mbuf back to the rx ring */
                bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
index f766b26..111a213 100644 (file)
 #define B_RX_DB(db, prod)                                              \
                (*(uint32_t *)db = (DB_KEY_RX | prod))
 
+#define RX_CMP_L4_CS_BITS      rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
+
+#define RX_CMP_L4_CS_ERR_BITS  rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR)
+
+#define RX_CMP_L4_CS_OK(rxcmp1)                                                \
+           (((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) &&          \
+            !((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_IP_CS_ERR_BITS  rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR)
+
+#define RX_CMP_IP_CS_BITS      rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
+
+#define RX_CMP_IP_CS_OK(rxcmp1)                                                \
+               (((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) &&      \
+               !((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS))
+
 struct bnxt_sw_rx_bd {
        struct rte_mbuf         *mbuf; /* data associated with RX descriptor */
 };
index 8bf8fee..c2f9ae7 100644 (file)
@@ -161,7 +161,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 
        if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
                                PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
-                               PKT_TX_VLAN_PKT))
+                               PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM))
                long_bd = true;
 
        tx_buf = &txr->tx_buf_ring[txr->tx_prod];
@@ -211,20 +211,38 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
 
                if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
                        /* TSO */
-                       txbd1->lflags = TX_BD_LONG_LFLAGS_LSO;
+                       txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO;
                        txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
                                        tx_pkt->l4_len;
                        txbd1->mss = tx_pkt->tso_segsz;
 
-               } else if (tx_pkt->ol_flags & (PKT_TX_TCP_CKSUM |
-                                       PKT_TX_UDP_CKSUM)) {
+               } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
+                       /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+                       txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+                       txbd1->mss = 0;
+               } else if (tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) {
+                       /* (Inner) IP, (Inner) TCP/UDP CSO */
+                       txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+                       txbd1->mss = 0;
+               } else if (tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) {
+                       /* Outer IP, (Inner) TCP/UDP CSO */
+                       txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+                       txbd1->mss = 0;
+               } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) {
+                       /* Outer IP, Inner IP CSO */
+                       txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
+                       txbd1->mss = 0;
+               } else if (tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) {
                        /* TCP/UDP CSO */
-                       txbd1->lflags = TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+                       txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
                        txbd1->mss = 0;
-
                } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
                        /* IP CSO */
-                       txbd1->lflags = TX_BD_LONG_LFLAGS_IP_CHKSUM;
+                       txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
+                       txbd1->mss = 0;
+               } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+                       /* IP CSO */
+                       txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
                        txbd1->mss = 0;
                }
        } else {
index 4c16101..cb961f1 100644 (file)
@@ -69,4 +69,25 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                               uint16_t nb_pkts);
 
+#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM   (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
+                                       PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_IIP_TCP_UDP_CKSUM       (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
+                                       PKT_TX_IP_CKSUM)
+#define PKT_TX_OIP_TCP_UDP_CKSUM       (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
+                                       PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_CKSUM           (PKT_TX_IP_CKSUM |      \
+                                        PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_TCP_UDP_CKSUM           (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)
+
+
+#define TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM        (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
+                                       TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \
+                                       TX_BD_LONG_LFLAGS_IP_CHKSUM)
+#define TX_BD_FLG_IP_TCP_UDP_CHKSUM    (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
+                                       TX_BD_LONG_LFLAGS_IP_CHKSUM)
+#define TX_BD_FLG_TIP_IP_CHKSUM                (TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \
+                                       TX_BD_LONG_LFLAGS_IP_CHKSUM)
+#define TX_BD_FLG_TIP_TCP_UDP_CHKSUM   (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \
+                                       TX_BD_LONG_LFLAGS_T_IP_CHKSUM)
+
 #endif
index b4a1e72..8081981 100644 (file)
@@ -934,37 +934,30 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
 }
 
 int
-bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev,
+bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev __rte_unused,
                uint8_t slave_id)
 {
-       struct bond_dev_private *internals = bond_dev->data->dev_private;
        void *pkt = NULL;
-       struct port *port;
-       uint8_t i;
+       struct port *port = NULL;
+       uint8_t old_partner_state;
 
-       /* Given slave must be in active list */
-       RTE_ASSERT(find_slave_by_id(internals->active_slaves,
-       internals->active_slave_count, slave_id) < internals->active_slave_count);
+       port = &mode_8023ad_ports[slave_id];
 
-       /* Exclude slave from transmit policy. If this slave is an aggregator
-        * make all aggregated slaves unselected to force selection logic
-        * to select suitable aggregator for this port. */
-       for (i = 0; i < internals->active_slave_count; i++) {
-               port = &mode_8023ad_ports[internals->active_slaves[i]];
-               if (port->aggregator_port_id != slave_id)
-                       continue;
+       ACTOR_STATE_CLR(port, AGGREGATION);
+       port->selected = UNSELECTED;
 
-               port->selected = UNSELECTED;
+       old_partner_state = port->partner_state;
+       record_default(port);
 
-               /* Use default aggregator */
-               port->aggregator_port_id = internals->active_slaves[i];
-       }
+       /* If partner timeout state changes then disable timer */
+       if (!((old_partner_state ^ port->partner_state) &
+                       STATE_LACP_SHORT_TIMEOUT))
+               timer_cancel(&port->current_while_timer);
 
-       port = &mode_8023ad_ports[slave_id];
-       port->selected = UNSELECTED;
-       port->actor_state &= ~(STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
-                       STATE_COLLECTING);
+       PARTNER_STATE_CLR(port, AGGREGATION);
+       ACTOR_STATE_CLR(port, EXPIRED);
 
+       /* flush rx/tx rings */
        while (rte_ring_dequeue(port->rx_ring, &pkt) == 0)
                rte_pktmbuf_free((struct rte_mbuf *)pkt);
 
index 19afdac..565a695 100644 (file)
@@ -403,6 +403,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
                        t4_os_atomic_list_del(&entry, &adap->mbox_list,
                                              &adap->mbox_lock);
                        t4_report_fw_error(adap);
+                       free(temp);
                        return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
                }
 
@@ -446,6 +447,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
                                                         &adap->mbox_list,
                                                         &adap->mbox_lock));
                t4_report_fw_error(adap);
+               free(temp);
                return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
        }
 
@@ -546,6 +548,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
                        T4_OS_MBOX_LOCKING(
                                t4_os_atomic_list_del(&entry, &adap->mbox_list,
                                                      &adap->mbox_lock));
+                       free(temp);
                        return -G_FW_CMD_RETVAL((int)res);
                }
        }
index 9cf619f..407021d 100644 (file)
@@ -3839,7 +3839,7 @@ eth_igb_get_flex_filter(struct rte_eth_dev *dev,
        flex_filter.filter_info.priority = filter->priority;
        memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
        memcpy(flex_filter.filter_info.mask, filter->mask,
-                       RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
+                       RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT);
 
        it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
                                &flex_filter.filter_info);
@@ -5095,7 +5095,13 @@ eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct e1000_hw *hw =
                E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t mask = 1 << queue_id;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       uint32_t vec = E1000_MISC_VEC_ID;
+
+       if (rte_intr_allow_others(intr_handle))
+               vec = E1000_RX_VEC_START;
+
+       uint32_t mask = 1 << (queue_id + vec);
 
        E1000_WRITE_REG(hw, E1000_EIMC, mask);
        E1000_WRITE_FLUSH(hw);
@@ -5108,7 +5114,13 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct e1000_hw *hw =
                E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t mask = 1 << queue_id;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       uint32_t vec = E1000_MISC_VEC_ID;
+
+       if (rte_intr_allow_others(intr_handle))
+               vec = E1000_RX_VEC_START;
+
+       uint32_t mask = 1 << (queue_id + vec);
        uint32_t regval;
 
        regval = E1000_READ_REG(hw, E1000_EIMS);
index 1861a32..b25eff4 100644 (file)
@@ -231,7 +231,7 @@ enic_free_rq_buf(struct rte_mbuf **mbuf)
                return;
 
        rte_pktmbuf_free(*mbuf);
-       mbuf = NULL;
+       *mbuf = NULL;
 }
 
 void enic_init_vnic_resources(struct enic *enic)
@@ -375,6 +375,7 @@ enic_alloc_consistent(void *priv, size_t size,
                pr_err("%s : Failed to allocate memory for memzone list\n",
                       __func__);
                rte_memzone_free(rz);
+               return NULL;
        }
 
        mze->rz = rz;
@@ -1124,11 +1125,12 @@ static int
 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
 {
        struct vnic_rq *sop_rq, *data_rq;
-       unsigned int cq_idx = enic_cq_rq(enic, rq_idx);
+       unsigned int cq_idx;
        int rc = 0;
 
        sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
        data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
+       cq_idx = rq_idx;
 
        vnic_cq_clean(&enic->cq[cq_idx]);
        vnic_cq_init(&enic->cq[cq_idx],
index 38e7ba5..67f2946 100644 (file)
@@ -35,6 +35,7 @@
 
 #include <string.h>
 #include <stdint.h>
+#include <stdbool.h>
 #include <stdio.h>
 #include <stdarg.h>
 
@@ -56,7 +57,6 @@ typedef uint16_t        u16;
 typedef uint32_t        u32;
 typedef int32_t         s32;
 typedef uint64_t        u64;
-typedef int             bool;
 
 typedef enum i40e_status_code i40e_status;
 #define __iomem
index 65e10f3..0835c2d 100644 (file)
 /* Flow control default timer */
 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
 
-/* Flow control default high water */
-#define I40E_DEFAULT_HIGH_WATER (0x1C40/1024)
-
-/* Flow control default low water */
-#define I40E_DEFAULT_LOW_WATER  (0x1A40/1024)
-
 /* Flow control enable fwd bit */
 #define I40E_PRTMAC_FWD_CTRL   0x00000001
 
 /* Kilobytes shift */
 #define I40E_KILOSHIFT 10
 
+/* Flow control default high water */
+#define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
+
+/* Flow control default low water */
+#define I40E_DEFAULT_LOW_WATER  (0xF2000 >> I40E_KILOSHIFT)
+
 /* Receive Average Packet Size in Byte*/
 #define I40E_PACKET_AVERAGE_SIZE 128
 
@@ -422,6 +422,12 @@ static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
 static void i40e_configure_registers(struct i40e_hw *hw);
 static void i40e_hw_init(struct rte_eth_dev *dev);
 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
+static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
+                                                    uint16_t seid,
+                                                    uint16_t rule_type,
+                                                    uint16_t *entries,
+                                                    uint16_t count,
+                                                    uint16_t rule_id);
 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
                        struct rte_eth_mirror_conf *mirror_conf,
                        uint8_t sw_id, uint8_t on);
@@ -723,23 +729,22 @@ RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
 {
        /*
-        * Initialize registers for flexible payload, which should be set by NVM.
-        * This should be removed from code once it is fixed in NVM.
+        * Force global configuration for flexible payload
+        * to the first 16 bytes of the corresponding L2/L3/L4 paylod.
+        * This should be removed from code once proper
+        * configuration API is added to avoid configuration conflicts
+        * between ports of the same device.
         */
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
        I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
        I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
        I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
-       I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
-       I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
-       I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
 
-       /* Initialize registers for parsing packet type of QinQ */
+       /*
+        * Initialize registers for parsing packet type of QinQ
+        * This should be removed from code once proper
+        * configuration API is added to avoid configuration conflicts
+        * between ports of the same device.
+        */
        I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
        I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
 }
@@ -1819,7 +1824,6 @@ i40e_dev_stop(struct rte_eth_dev *dev)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_vsi *main_vsi = pf->main_vsi;
-       struct i40e_mirror_rule *p_mirror;
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        int i;
 
@@ -1845,13 +1849,6 @@ i40e_dev_stop(struct rte_eth_dev *dev)
        /* Set link down */
        i40e_dev_set_link_down(dev);
 
-       /* Remove all mirror rules */
-       while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
-               TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
-               rte_free(p_mirror);
-       }
-       pf->nb_mirror_rule = 0;
-
        if (!rte_intr_allow_others(intr_handle))
                /* resume to the default handler */
                rte_intr_callback_register(intr_handle,
@@ -1871,13 +1868,35 @@ i40e_dev_close(struct rte_eth_dev *dev)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_mirror_rule *p_mirror;
        uint32_t reg;
        int i;
+       int ret;
 
        PMD_INIT_FUNC_TRACE();
 
        i40e_dev_stop(dev);
        hw->adapter_stopped = 1;
+
+       /* Remove all mirror rules */
+       while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
+               ret = i40e_aq_del_mirror_rule(hw,
+                                             pf->main_vsi->veb->seid,
+                                             p_mirror->rule_type,
+                                             p_mirror->entries,
+                                             p_mirror->num_entries,
+                                             p_mirror->id);
+               if (ret < 0)
+                       PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
+                                   "status = %d, aq_err = %d.", ret,
+                                   hw->aq.asq_last_status);
+
+               /* remove mirror software resource anyway */
+               TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
+               rte_free(p_mirror);
+               pf->nb_mirror_rule--;
+       }
+
        i40e_dev_free_queues(dev);
 
        /* Disable interrupt */
@@ -2376,13 +2395,14 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        /* call read registers - updates values, now write them to struct */
        i40e_read_stats_registers(pf, hw);
 
-       stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
-                       pf->main_vsi->eth_stats.rx_multicast +
-                       pf->main_vsi->eth_stats.rx_broadcast -
+       stats->ipackets = ns->eth.rx_unicast +
+                       ns->eth.rx_multicast +
+                       ns->eth.rx_broadcast -
+                       ns->eth.rx_discards -
                        pf->main_vsi->eth_stats.rx_discards;
-       stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
-                       pf->main_vsi->eth_stats.tx_multicast +
-                       pf->main_vsi->eth_stats.tx_broadcast;
+       stats->opackets = ns->eth.tx_unicast +
+                       ns->eth.tx_multicast +
+                       ns->eth.tx_broadcast;
        stats->ibytes   = ns->eth.rx_bytes;
        stats->obytes   = ns->eth.tx_bytes;
        stats->oerrors  = ns->eth.tx_errors +
@@ -2879,6 +2899,13 @@ i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 
        fc_conf->pause_time = pf->fc_conf.pause_time;
+
+       /* read out from register, in case they are modified by other port */
+       pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
+               I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
+       pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
+               I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
+
        fc_conf->high_water =  pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
        fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
 
@@ -8354,7 +8381,7 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
 
 /* For both X710 and XL710 */
 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1     0x10000200
-#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2     0x20000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2     0x203F0200
 #define I40E_GL_SWR_PRI_JOIN_MAP_0             0x26CE00
 
 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
index efd4fac..1686914 100644 (file)
@@ -68,7 +68,6 @@
 #include "i40e_ethdev.h"
 #include "i40e_pf.h"
 #define I40EVF_VSI_DEFAULT_MSIX_INTR     1
-#define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0
 
 /* busy wait delay in msec */
 #define I40EVF_BUSY_WAIT_DELAY 10
@@ -723,11 +722,12 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
        uint32_t vector_id;
        int i, err;
 
-       if (rte_intr_allow_others(intr_handle)) {
+       if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+           rte_intr_allow_others(intr_handle)) {
                if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
                        vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
                else
-                       vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX;
+                       vector_id = I40E_RX_VEC_START;
        } else {
                vector_id = I40E_MISC_VEC_ID;
        }
@@ -859,7 +859,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
        int err;
        struct vf_cmd_info args;
 
-       if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
+       if (is_zero_ether_addr(addr)) {
                PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
                            addr->addr_bytes[0], addr->addr_bytes[1],
                            addr->addr_bytes[2], addr->addr_bytes[3],
@@ -952,16 +952,74 @@ i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
        return 0;
 }
 
+static void
+i40evf_stat_update_48(uint64_t *offset,
+                  uint64_t *stat)
+{
+       if (*stat >= *offset)
+               *stat = *stat - *offset;
+       else
+               *stat = (uint64_t)((*stat +
+                       ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
+
+       *stat &= I40E_48_BIT_MASK;
+}
+
+static void
+i40evf_stat_update_32(uint64_t *offset,
+                  uint64_t *stat)
+{
+       if (*stat >= *offset)
+               *stat = (uint64_t)(*stat - *offset);
+       else
+               *stat = (uint64_t)((*stat +
+                       ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
+}
+
+static void
+i40evf_update_vsi_stats(struct i40e_vsi *vsi,
+                                       struct i40e_eth_stats *nes)
+{
+       struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
+
+       i40evf_stat_update_48(&oes->rx_bytes,
+                           &nes->rx_bytes);
+       i40evf_stat_update_48(&oes->rx_unicast,
+                           &nes->rx_unicast);
+       i40evf_stat_update_48(&oes->rx_multicast,
+                           &nes->rx_multicast);
+       i40evf_stat_update_48(&oes->rx_broadcast,
+                           &nes->rx_broadcast);
+       i40evf_stat_update_32(&oes->rx_discards,
+                               &nes->rx_discards);
+       i40evf_stat_update_32(&oes->rx_unknown_protocol,
+                           &nes->rx_unknown_protocol);
+       i40evf_stat_update_48(&oes->tx_bytes,
+                           &nes->tx_bytes);
+       i40evf_stat_update_48(&oes->tx_unicast,
+                           &nes->tx_unicast);
+       i40evf_stat_update_48(&oes->tx_multicast,
+                           &nes->tx_multicast);
+       i40evf_stat_update_48(&oes->tx_broadcast,
+                           &nes->tx_broadcast);
+       i40evf_stat_update_32(&oes->tx_errors, &nes->tx_errors);
+       i40evf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
+}
+
 static int
 i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        int ret;
        struct i40e_eth_stats *pstats = NULL;
+       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vsi *vsi = &vf->vsi;
 
        ret = i40evf_update_stats(dev, &pstats);
        if (ret != 0)
                return 0;
 
+       i40evf_update_vsi_stats(vsi, pstats);
+
        stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
                                                pstats->rx_broadcast;
        stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
@@ -984,7 +1042,7 @@ i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
        i40evf_update_stats(dev, &pstats);
 
        /* set stats offset base on current values */
-       vf->vsi.eth_stats_offset = vf->vsi.eth_stats;
+       vf->vsi.eth_stats_offset = *pstats;
 }
 
 static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
@@ -1008,6 +1066,8 @@ static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
        int ret;
        unsigned i;
        struct i40e_eth_stats *pstats = NULL;
+       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vsi *vsi = &vf->vsi;
 
        if (n < I40EVF_NB_XSTATS)
                return I40EVF_NB_XSTATS;
@@ -1019,6 +1079,8 @@ static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
        if (!xstats)
                return 0;
 
+       i40evf_update_vsi_stats(vsi, pstats);
+
        /* loop over xstats array and values from pstats */
        for (i = 0; i < I40EVF_NB_XSTATS; i++) {
                xstats[i].id = i;
@@ -1210,29 +1272,29 @@ i40evf_init_vf(struct rte_eth_dev *dev)
        /* VF reset, shutdown admin queue and initialize again */
        if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
                PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
-               return -1;
+               goto err;
        }
 
        i40e_init_adminq_parameter(hw);
        if (i40e_init_adminq(hw) != I40E_SUCCESS) {
                PMD_INIT_LOG(ERR, "init_adminq failed");
-               return -1;
+               goto err;
        }
        vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0);
        if (!vf->aq_resp) {
                PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
-                       goto err_aq;
+               goto err_aq;
        }
        if (i40evf_check_api_version(dev) != 0) {
                PMD_INIT_LOG(ERR, "check_api version failed");
-               goto err_aq;
+               goto err_api;
        }
        bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
                (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
        vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
        if (!vf->vf_res) {
                PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
-                       goto err_aq;
+               goto err_api;
        }
 
        if (i40evf_get_vf_resource(dev) != 0) {
@@ -1279,6 +1341,9 @@ i40evf_init_vf(struct rte_eth_dev *dev)
 
 err_alloc:
        rte_free(vf->vf_res);
+       vf->vsi_res = NULL;
+err_api:
+       rte_free(vf->aq_resp);
 err_aq:
        i40e_shutdown_adminq(hw); /* ignore error */
 err:
@@ -1439,7 +1504,6 @@ i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
 
 done:
        i40evf_enable_irq0(hw);
-       rte_intr_enable(&dev->pci_dev->intr_handle);
 }
 
 static int
@@ -2090,7 +2154,20 @@ i40evf_dev_start(struct rte_eth_dev *dev)
                goto err_mac;
        }
 
+       /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
+        * is mapped to VFIO vector 0 in i40evf_dev_init( ).
+        * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is
+        * not cleared, it will fail when rte_intr_enable( ) tries to map Rx
+        * queue interrupt to other VFIO vectors.
+        * So clear uio/vfio intr/evevnfd first to avoid failure.
+        */
+       if (dev->data->dev_conf.intr_conf.rxq != 0) {
+               rte_intr_disable(intr_handle);
+               rte_intr_enable(intr_handle);
+       }
+
        i40evf_enable_queues_intr(dev);
+
        return 0;
 
 err_mac:
@@ -2160,6 +2237,8 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
        new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
        new_link.link_status = vf->link_up ? ETH_LINK_UP :
                                             ETH_LINK_DOWN;
+       new_link.link_autoneg =
+               dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED;
 
        i40evf_dev_atomic_write_link_status(dev, &new_link);
 
index b36d901..c5e06ca 100644 (file)
@@ -902,7 +902,10 @@ send_msg:
 static void
 i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
 {
+       struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
        struct i40e_virtchnl_pf_event event;
+       uint16_t vf_id = vf->vf_idx;
+       uint32_t tval, rval;
 
        event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
        event.event_data.link_event.link_status =
@@ -934,8 +937,15 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
                break;
        }
 
-       i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
-               I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+       tval = I40E_READ_REG(hw, I40E_VF_ATQLEN(vf_id));
+       rval = I40E_READ_REG(hw, I40E_VF_ARQLEN(vf_id));
+
+       if (tval & I40E_VF_ATQLEN_ATQLEN_MASK ||
+           tval & I40E_VF_ATQLEN_ATQENABLE_MASK ||
+           rval & I40E_VF_ARQLEN_ARQLEN_MASK ||
+           rval & I40E_VF_ARQLEN_ARQENABLE_MASK)
+               i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
+                       I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
 }
 
 void
index ba33b2a..86546ca 100644 (file)
@@ -1646,36 +1646,42 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        const struct rte_eth_rxconf *rx_conf,
                        struct rte_mempool *mp)
 {
-       struct i40e_vsi *vsi;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_adapter *ad =
                I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct i40e_vsi *vsi;
+       struct i40e_pf *pf = NULL;
+       struct i40e_vf *vf = NULL;
        struct i40e_rx_queue *rxq;
        const struct rte_memzone *rz;
        uint32_t ring_size;
        uint16_t len, i;
-       uint16_t base, bsf, tc_mapping;
-       int use_def_burst_func = 1;
+       uint16_t reg_idx, base, bsf, tc_mapping;
+       int q_offset, use_def_burst_func = 1;
 
        if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
-               struct i40e_vf *vf =
-                       I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+               vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
                vsi = &vf->vsi;
-       } else
+               if (!vsi)
+                       return -EINVAL;
+               reg_idx = queue_idx;
+       } else {
+               pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
                vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
-
-       if (vsi == NULL) {
-               PMD_DRV_LOG(ERR, "VSI not available or queue "
-                           "index exceeds the maximum");
-               return I40E_ERR_PARAM;
+               if (!vsi)
+                       return -EINVAL;
+               q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+               if (q_offset < 0)
+                       return -EINVAL;
+               reg_idx = vsi->base_queue + q_offset;
        }
+
        if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
-                       (nb_desc > I40E_MAX_RING_DESC) ||
-                       (nb_desc < I40E_MIN_RING_DESC)) {
+           (nb_desc > I40E_MAX_RING_DESC) ||
+           (nb_desc < I40E_MIN_RING_DESC)) {
                PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
                            "invalid", nb_desc);
-               return I40E_ERR_PARAM;
+               return -EINVAL;
        }
 
        /* Free memory if needed */
@@ -1698,12 +1704,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->nb_rx_desc = nb_desc;
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
        rxq->queue_id = queue_idx;
-       if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
-               rxq->reg_idx = queue_idx;
-       else /* PF device */
-               rxq->reg_idx = vsi->base_queue +
-                       i40e_get_queue_offset_by_qindex(pf, queue_idx);
-
+       rxq->reg_idx = reg_idx;
        rxq->port_id = dev->data->port_id;
        rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
                                                        0 : ETHER_CRC_LEN);
@@ -1862,34 +1863,40 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        unsigned int socket_id,
                        const struct rte_eth_txconf *tx_conf)
 {
-       struct i40e_vsi *vsi;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_vsi *vsi;
+       struct i40e_pf *pf = NULL;
+       struct i40e_vf *vf = NULL;
        struct i40e_tx_queue *txq;
        const struct rte_memzone *tz;
        uint32_t ring_size;
        uint16_t tx_rs_thresh, tx_free_thresh;
-       uint16_t i, base, bsf, tc_mapping;
+       uint16_t reg_idx, i, base, bsf, tc_mapping;
+       int q_offset;
 
        if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
-               struct i40e_vf *vf =
-                       I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+               vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
                vsi = &vf->vsi;
-       } else
+               if (!vsi)
+                       return -EINVAL;
+               reg_idx = queue_idx;
+       } else {
+               pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
                vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
-
-       if (vsi == NULL) {
-               PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
-                           "exceeds the maximum", queue_idx);
-               return I40E_ERR_PARAM;
+               if (!vsi)
+                       return -EINVAL;
+               q_offset = i40e_get_queue_offset_by_qindex(pf, queue_idx);
+               if (q_offset < 0)
+                       return -EINVAL;
+               reg_idx = vsi->base_queue + q_offset;
        }
 
        if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
-                       (nb_desc > I40E_MAX_RING_DESC) ||
-                       (nb_desc < I40E_MIN_RING_DESC)) {
+           (nb_desc > I40E_MAX_RING_DESC) ||
+           (nb_desc < I40E_MIN_RING_DESC)) {
                PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
                            "invalid", nb_desc);
-               return I40E_ERR_PARAM;
+               return -EINVAL;
        }
 
        /**
@@ -1998,12 +2005,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->hthresh = tx_conf->tx_thresh.hthresh;
        txq->wthresh = tx_conf->tx_thresh.wthresh;
        txq->queue_id = queue_idx;
-       if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
-               txq->reg_idx = queue_idx;
-       else /* PF device */
-               txq->reg_idx = vsi->base_queue +
-                       i40e_get_queue_offset_by_qindex(pf, queue_idx);
-
+       txq->reg_idx = reg_idx;
        txq->port_id = dev->data->port_id;
        txq->txq_flags = tx_conf->txq_flags;
        txq->vsi = vsi;
@@ -2157,18 +2159,40 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
 void
 i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
 {
+       struct rte_eth_dev *dev;
        uint16_t i;
 
+       dev = &rte_eth_devices[txq->port_id];
+
        if (!txq || !txq->sw_ring) {
                PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
                return;
        }
 
-       for (i = 0; i < txq->nb_tx_desc; i++) {
-               if (txq->sw_ring[i].mbuf) {
+       /**
+        *  vPMD tx will not set sw_ring's mbuf to NULL after free,
+        *  so need to free remains more carefully.
+        */
+       if (dev->tx_pkt_burst == i40e_xmit_pkts_vec) {
+               i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
+               if (txq->tx_tail < i) {
+                       for (; i < txq->nb_tx_desc; i++) {
+                               rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+                               txq->sw_ring[i].mbuf = NULL;
+                       }
+                       i = 0;
+               }
+               for (; i < txq->tx_tail; i++) {
                        rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
                        txq->sw_ring[i].mbuf = NULL;
                }
+       } else {
+               for (i = 0; i < txq->nb_tx_desc; i++) {
+                       if (txq->sw_ring[i].mbuf) {
+                               rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+                               txq->sw_ring[i].mbuf = NULL;
+                       }
+               }
        }
 }
 
index d235daa..557593a 100644 (file)
@@ -192,8 +192,7 @@ desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
 #endif
 
 #define PKTLEN_SHIFT     10
-
-#define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL
+#define I40E_UINT16_BIT (CHAR_BIT * sizeof(uint16_t))
 
 static inline void
 desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
@@ -224,7 +223,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
        struct i40e_rx_entry *sw_ring;
        uint16_t nb_pkts_recd;
        int pos;
-       uint64_t var;
 
        /* mask to shuffle from desc. to mbuf */
        uint8x16_t shuf_msk = {
@@ -357,7 +355,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                /* C.2 get 4 pkts staterr value  */
                staterr = vzipq_u16(sterr_tmp1.val[1],
                                    sterr_tmp2.val[1]).val[0];
-               stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
 
                desc_to_olflags_v(descs, &rx_pkts[pos]);
 
@@ -422,6 +419,12 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                        rx_pkts[pos + 3]->next = NULL;
                }
 
+               staterr = vshlq_n_u16(staterr, I40E_UINT16_BIT - 1);
+               staterr = vreinterpretq_u16_s16(
+                               vshrq_n_s16(vreinterpretq_s16_u16(staterr),
+                                           I40E_UINT16_BIT - 1));
+               stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
+
                rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP);
 
                /* D.3 copy final 1,2 data to rx_pkts */
@@ -431,10 +434,12 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                         pkt_mb1);
                desc_to_ptype_v(descs, &rx_pkts[pos]);
                /* C.4 calc avaialbe number of desc */
-               var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK);
-               nb_pkts_recd += var;
-               if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+               if (unlikely(stat == 0)) {
+                       nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP;
+               } else {
+                       nb_pkts_recd += __builtin_ctzl(stat) / I40E_UINT16_BIT;
                        break;
+               }
        }
 
        /* Update our internal tail pointer */
index f994fed..73996bb 100644 (file)
@@ -3230,6 +3230,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
        link.link_status = ETH_LINK_DOWN;
        link.link_speed = 0;
        link.link_duplex = ETH_LINK_HALF_DUPLEX;
+       link.link_autoneg = ETH_LINK_AUTONEG;
        memset(&old, 0, sizeof(old));
        rte_ixgbe_dev_atomic_read_link_status(dev, &old);
 
@@ -4250,6 +4251,15 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        }
        ixgbevf_configure_msix(dev);
 
+       /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
+        * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
+        * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
+        * is not cleared, it will fail when following rte_intr_enable( ) tries
+        * to map Rx queue interrupt to other VFIO vectors.
+        * So clear uio/vfio intr/evevnfd first to avoid failure.
+        */
+       rte_intr_disable(intr_handle);
+
        rte_intr_enable(intr_handle);
 
        /* Re-enable interrupt for VF */
@@ -5013,13 +5023,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
        IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
 
        /* write pool mirrror control  register */
-       if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
+       if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
                IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
                IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
                                mp_msb);
        }
        /* write VLAN mirrror control  register */
-       if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
+       if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
                IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
                IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
                                mv_msb);
@@ -6778,6 +6788,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        struct ixgbe_dcb_config *dcb_config =
                        IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
        struct ixgbe_dcb_tc_config *tc;
+       struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+       uint8_t nb_tcs;
        uint8_t i, j;
 
        if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
@@ -6785,19 +6797,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        else
                dcb_info->nb_tcs = 1;
 
+       tc_queue = &dcb_info->tc_queue;
+       nb_tcs = dcb_info->nb_tcs;
+
        if (dcb_config->vt_mode) { /* vt is enabled*/
                struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
                                &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
                for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
                        dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
-               for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
-                       for (j = 0; j < dcb_info->nb_tcs; j++) {
-                               dcb_info->tc_queue.tc_rxq[i][j].base =
-                                               i * dcb_info->nb_tcs + j;
-                               dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
-                               dcb_info->tc_queue.tc_txq[i][j].base =
-                                               i * dcb_info->nb_tcs + j;
-                               dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+               if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+                       for (j = 0; j < nb_tcs; j++) {
+                               tc_queue->tc_rxq[0][j].base = j;
+                               tc_queue->tc_rxq[0][j].nb_queue = 1;
+                               tc_queue->tc_txq[0][j].base = j;
+                               tc_queue->tc_txq[0][j].nb_queue = 1;
+                       }
+               } else {
+                       for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+                               for (j = 0; j < nb_tcs; j++) {
+                                       tc_queue->tc_rxq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_rxq[i][j].nb_queue = 1;
+                                       tc_queue->tc_txq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_txq[i][j].nb_queue = 1;
+                               }
                        }
                }
        } else { /* vt is disabled*/
index 09440cc..a760b1b 100644 (file)
@@ -263,7 +263,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 
        gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
        gpie &= ~IXGBE_GPIE_VTMODE_MASK;
-       gpie |= IXGBE_GPIE_MSIX_MODE;
+       gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
 
        switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
        case ETH_64_POOLS:
index d1e300a..8b18b53 100644 (file)
@@ -3395,12 +3395,19 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
                dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
                dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
        }
+
+       /* Initialize User Priority to Traffic Class mapping */
+       for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+               tc = &dcb_config->tc_config[j];
+               tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+       }
+
        /* User Priority to Traffic Class mapping */
        for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
                j = vmdq_rx_conf->dcb_tc[i];
                tc = &dcb_config->tc_config[j];
-               tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
-                                               (uint8_t)(1 << j);
+               tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+                                               (uint8_t)(1 << i);
        }
 }
 
@@ -3422,12 +3429,18 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
                dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
        }
 
+       /* Initialize User Priority to Traffic Class mapping */
+       for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+               tc = &dcb_config->tc_config[j];
+               tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+       }
+
        /* User Priority to Traffic Class mapping */
        for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
                j = vmdq_tx_conf->dcb_tc[i];
                tc = &dcb_config->tc_config[j];
-               tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
-                                               (uint8_t)(1 << j);
+               tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+                                               (uint8_t)(1 << i);
        }
 }
 
@@ -3443,12 +3456,18 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
        dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
        dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
 
+       /* Initialize User Priority to Traffic Class mapping */
+       for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+               tc = &dcb_config->tc_config[j];
+               tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
+       }
+
        /* User Priority to Traffic Class mapping */
        for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
                j = rx_conf->dcb_tc[i];
                tc = &dcb_config->tc_config[j];
-               tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
-                                               (uint8_t)(1 << j);
+               tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
+                                               (uint8_t)(1 << i);
        }
 }
 
@@ -3464,12 +3483,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
        dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
        dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
 
+       /* Initialize User Priority to Traffic Class mapping */
+       for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+               tc = &dcb_config->tc_config[j];
+               tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
+       }
+
        /* User Priority to Traffic Class mapping */
        for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
                j = tx_conf->dcb_tc[i];
                tc = &dcb_config->tc_config[j];
-               tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
-                                               (uint8_t)(1 << j);
+               tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
+                                               (uint8_t)(1 << i);
        }
 }
 
index aa9d2dc..86d1e44 100644 (file)
@@ -463,8 +463,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                DEBUG("using port %u (%08" PRIx32 ")", port, test);
 
                ctx = ibv_open_device(ibv_dev);
-               if (ctx == NULL)
+               if (ctx == NULL) {
+                       err = ENODEV;
                        goto port_error;
+               }
 
                /* Check port status. */
                err = ibv_query_port(ctx, port, &port_attr);
@@ -476,6 +478,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
                        ERROR("port %d is not configured in Ethernet mode",
                              port);
+                       err = EINVAL;
                        goto port_error;
                }
 
@@ -519,6 +522,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                }
                if (ibv_exp_query_device(ctx, &exp_device_attr)) {
                        ERROR("ibv_exp_query_device() failed");
+                       err = ENODEV;
                        goto port_error;
                }
 
@@ -581,6 +585,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                if (priv_get_mac(priv, &mac.addr_bytes)) {
                        ERROR("cannot get MAC address, is mlx5_en loaded?"
                              " (errno: %s)", strerror(errno));
+                       err = ENODEV;
                        goto port_error;
                }
                INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
index ca981a5..2ea995f 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/sockios.h>
 #include <linux/version.h>
 #include <fcntl.h>
+#include <stdalign.h>
 
 /* DPDK headers don't like -pedantic. */
 #ifdef PEDANTIC
@@ -119,7 +120,6 @@ struct ethtool_link_settings {
 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
 #endif
-#define ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32 (SCHAR_MAX)
 
 /**
  * Return private structure associated with an Ethernet device.
@@ -755,12 +755,7 @@ static int
 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
 {
        struct priv *priv = mlx5_get_priv(dev);
-       __extension__ struct {
-               struct ethtool_link_settings edata;
-               uint32_t link_mode_data[3 *
-                                       ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32];
-       } ecmd;
-
+       struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
        struct ifreq ifr;
        struct rte_eth_link dev_link;
        uint64_t sc;
@@ -773,59 +768,65 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
        memset(&dev_link, 0, sizeof(dev_link));
        dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
                                (ifr.ifr_flags & IFF_RUNNING));
-       memset(&ecmd, 0, sizeof(ecmd));
-       ecmd.edata.cmd = ETHTOOL_GLINKSETTINGS;
-       ifr.ifr_data = (void *)&ecmd;
+       ifr.ifr_data = (void *)&gcmd;
        if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
                DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
                      strerror(errno));
                return -1;
        }
-       ecmd.edata.link_mode_masks_nwords = -ecmd.edata.link_mode_masks_nwords;
+       gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
+
+       alignas(struct ethtool_link_settings)
+       uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
+                    sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
+       struct ethtool_link_settings *ecmd = (void *)data;
+
+       *ecmd = gcmd;
+       ifr.ifr_data = (void *)ecmd;
        if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
                DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
                      strerror(errno));
                return -1;
        }
-       dev_link.link_speed = ecmd.edata.speed;
-       sc = ecmd.edata.link_mode_masks[0] |
-               ((uint64_t)ecmd.edata.link_mode_masks[1] << 32);
+       dev_link.link_speed = ecmd->speed;
+       sc = ecmd->link_mode_masks[0] |
+               ((uint64_t)ecmd->link_mode_masks[1] << 32);
        priv->link_speed_capa = 0;
-       if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
+       if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT))
                priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
-       if (sc & (ETHTOOL_LINK_MODE_1000baseT_Full_BIT |
-                 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))
+       if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
                priv->link_speed_capa |= ETH_LINK_SPEED_1G;
-       if (sc & (ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT |
-                 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT |
-                 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))
+       if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
                priv->link_speed_capa |= ETH_LINK_SPEED_10G;
-       if (sc & (ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT |
-                 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))
+       if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
                priv->link_speed_capa |= ETH_LINK_SPEED_20G;
-       if (sc & (ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT |
-                 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT |
-                 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT |
-                 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))
+       if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
                priv->link_speed_capa |= ETH_LINK_SPEED_40G;
-       if (sc & (ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT |
-                 ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT |
-                 ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT |
-                 ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))
+       if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
                priv->link_speed_capa |= ETH_LINK_SPEED_56G;
-       if (sc & (ETHTOOL_LINK_MODE_25000baseCR_Full_BIT |
-                 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT |
-                 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))
+       if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
                priv->link_speed_capa |= ETH_LINK_SPEED_25G;
-       if (sc & (ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT |
-                 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))
+       if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
                priv->link_speed_capa |= ETH_LINK_SPEED_50G;
-       if (sc & (ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT |
-                 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT |
-                 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
-                 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
+       if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
+                 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
                priv->link_speed_capa |= ETH_LINK_SPEED_100G;
-       dev_link.link_duplex = ((ecmd.edata.duplex == DUPLEX_HALF) ?
+       dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
                                ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
        dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
                                  ETH_LINK_SPEED_FIXED);
index aea203b..5095a2b 100644 (file)
@@ -609,11 +609,9 @@ priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
 int
 priv_rehash_flows(struct priv *priv)
 {
-       enum hash_rxq_flow_type i;
+       size_t i;
 
-       for (i = HASH_RXQ_FLOW_TYPE_PROMISC;
-                       i != RTE_DIM((*priv->hash_rxqs)[0].special_flow);
-                       ++i)
+       for (i = 0; i != RTE_DIM((*priv->hash_rxqs)[0].special_flow); ++i)
                if (!priv_allow_flow_type(priv, i)) {
                        priv_special_flow_disable(priv, i);
                } else {
index 58926e3..92e4fd5 100644 (file)
@@ -431,8 +431,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 #ifdef MLX5_PMD_SOFT_COUNTERS
                total_length = length;
 #endif
-               if (length < (MLX5_WQE_DWORD_SIZE + 2))
+               if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
+                       txq->stats.oerrors++;
                        break;
+               }
                /* Update element. */
                (*txq->elts)[elts_head] = buf;
                elts_head = (elts_head + 1) & (elts_n - 1);
@@ -735,8 +737,10 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                if (max < segs_n + 1)
                        break;
                /* Do not bother with large packets MPW cannot handle. */
-               if (segs_n > MLX5_MPW_DSEG_MAX)
+               if (segs_n > MLX5_MPW_DSEG_MAX) {
+                       txq->stats.oerrors++;
                        break;
+               }
                max -= segs_n;
                --pkts_n;
                /* Should we enable HW CKSUM offload */
@@ -941,8 +945,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                if (max < segs_n + 1)
                        break;
                /* Do not bother with large packets MPW cannot handle. */
-               if (segs_n > MLX5_MPW_DSEG_MAX)
+               if (segs_n > MLX5_MPW_DSEG_MAX) {
+                       txq->stats.oerrors++;
                        break;
+               }
                max -= segs_n;
                --pkts_n;
                /* Should we enable HW CKSUM offload */
index 909d80e..d3d6d6a 100644 (file)
@@ -81,7 +81,7 @@ struct mlx5_txq_stats {
        uint64_t opackets; /**< Total of successfully sent packets. */
        uint64_t obytes; /**< Total of successfully sent bytes. */
 #endif
-       uint64_t odropped; /**< Total of packets not sent when TX ring full. */
+       uint64_t oerrors; /**< Total number of failed transmitted packets. */
 };
 
 /* Flow director queue structure. */
index f2b5781..79b1841 100644 (file)
@@ -94,13 +94,13 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                        tmp.q_opackets[idx] += txq->stats.opackets;
                        tmp.q_obytes[idx] += txq->stats.obytes;
 #endif
-                       tmp.q_errors[idx] += txq->stats.odropped;
+                       tmp.q_errors[idx] += txq->stats.oerrors;
                }
 #ifdef MLX5_PMD_SOFT_COUNTERS
                tmp.opackets += txq->stats.opackets;
                tmp.obytes += txq->stats.obytes;
 #endif
-               tmp.oerrors += txq->stats.odropped;
+               tmp.oerrors += txq->stats.oerrors;
        }
 #ifndef MLX5_PMD_SOFT_COUNTERS
        /* FIXME: retrieve and add hardware counters. */
index a824787..f126979 100644 (file)
@@ -35,6 +35,7 @@
 #define RTE_PMD_MLX5_UTILS_H_
 
 #include <stddef.h>
+#include <stdint.h>
 #include <stdio.h>
 #include <limits.h>
 #include <assert.h>
@@ -61,6 +62,9 @@
         !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
             ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
 
+/* Convert a bit number to the corresponding 64-bit mask */
+#define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
+
 /* Save and restore errno around argument evaluation. */
 #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
 
index 49c5293..7445320 100644 (file)
@@ -857,6 +857,8 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
 
+       memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
+
        /* reading per RX ring stats */
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
@@ -2120,7 +2122,8 @@ nfp_net_reta_update(struct rte_eth_dev *dev,
                                reta &= ~(0xFF << (8 * j));
                        reta |= reta_conf[idx].reta[shift + j] << (8 * j);
                }
-               nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta);
+               nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
+                             reta);
        }
 
        update = NFP_NET_CFG_UPDATE_RSS;
@@ -2167,7 +2170,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
                if (!mask)
                        continue;
 
-               reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift);
+               reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
+                                   shift);
                for (j = 0; j < 4; j++) {
                        if (!(mask & (0x1 << j)))
                                continue;
@@ -2217,6 +2221,9 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
                                NFP_NET_CFG_RSS_IPV6_TCP |
                                NFP_NET_CFG_RSS_IPV6_UDP;
 
+       cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
+       cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
+
        /* configuring where to apply the RSS hash */
        nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
 
index 1a208ff..f6b3c10 100644 (file)
@@ -411,11 +411,13 @@ open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
        /* The dumper is created using the previous pcap_t reference */
        *dumper = pcap_dump_open(tx_pcap, pcap_filename);
        if (*dumper == NULL) {
+               pcap_close(tx_pcap);
                RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n",
                        pcap_filename);
                return -1;
        }
 
+       pcap_close(tx_pcap);
        return 0;
 }
 
index 29b443d..18150b5 100644 (file)
@@ -66,8 +66,9 @@ CFLAGS_BASE_DRIVER += -Wno-sometimes-uninitialized
 ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
 CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion
 endif
-else
-CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type
+else #ICC
+CFLAGS_BASE_DRIVER += -wd188   #188: enumerated type mixed with another type
+CFLAGS_qede_ethdev.o += -wd279 #279: controlling expression is constant
 endif
 
 #
index 907b35b..e2da8aa 100644 (file)
@@ -624,45 +624,45 @@ struct ecore_dev {
        u16 device_id;
 
        u16                             chip_num;
-       #define CHIP_NUM_MASK                   0xffff
-       #define CHIP_NUM_SHIFT                  16
+#define CHIP_NUM_MASK                  0xffff
+#define CHIP_NUM_SHIFT                 0
 
-       u16                             chip_rev;
-       #define CHIP_REV_MASK                   0xf
-       #define CHIP_REV_SHIFT                  12
+       u                             chip_rev;
+#define CHIP_REV_MASK                  0xf
+#define CHIP_REV_SHIFT                 0
 #ifndef ASIC_ONLY
-       #define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
-       #define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
-       #define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
-       #define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
-                                         CHIP_REV_IS_EMUL_B0(_p_dev))
-       #define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
-       #define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
-       #define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
-                                         CHIP_REV_IS_FPGA_B0(_p_dev))
-       #define CHIP_REV_IS_SLOW(_p_dev) \
-               (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
-       #define CHIP_REV_IS_A0(_p_dev) \
-               (CHIP_REV_IS_EMUL_A0(_p_dev) || \
-                CHIP_REV_IS_FPGA_A0(_p_dev) || \
-                !(_p_dev)->chip_rev)
-       #define CHIP_REV_IS_B0(_p_dev) \
-               (CHIP_REV_IS_EMUL_B0(_p_dev) || \
-                CHIP_REV_IS_FPGA_B0(_p_dev) || \
-                (_p_dev)->chip_rev == 1)
-       #define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
+#define CHIP_REV_IS_TEDIBEAR(_p_dev)   ((_p_dev)->chip_rev == 0x5)
+#define CHIP_REV_IS_EMUL_A0(_p_dev)    ((_p_dev)->chip_rev == 0xe)
+#define CHIP_REV_IS_EMUL_B0(_p_dev)    ((_p_dev)->chip_rev == 0xc)
+#define CHIP_REV_IS_EMUL(_p_dev) \
+       (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_EMUL_B0(_p_dev))
+#define CHIP_REV_IS_FPGA_A0(_p_dev)    ((_p_dev)->chip_rev == 0xf)
+#define CHIP_REV_IS_FPGA_B0(_p_dev)    ((_p_dev)->chip_rev == 0xd)
+#define CHIP_REV_IS_FPGA(_p_dev) \
+       (CHIP_REV_IS_FPGA_A0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev))
+#define CHIP_REV_IS_SLOW(_p_dev) \
+       (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
+#define CHIP_REV_IS_A0(_p_dev) \
+       (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_FPGA_A0(_p_dev) || \
+        (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal))
+#define CHIP_REV_IS_B0(_p_dev) \
+       (CHIP_REV_IS_EMUL_B0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev) || \
+        ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal))
+#define CHIP_REV_IS_ASIC(_p_dev)       !CHIP_REV_IS_SLOW(_p_dev)
 #else
-       #define CHIP_REV_IS_A0(_p_dev)  (!(_p_dev)->chip_rev)
-       #define CHIP_REV_IS_B0(_p_dev)  ((_p_dev)->chip_rev == 1)
+#define CHIP_REV_IS_A0(_p_dev) \
+       (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal)
+#define CHIP_REV_IS_B0(_p_dev) \
+       ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal)
 #endif
 
-       u16                             chip_metal;
-       #define CHIP_METAL_MASK                 0xff
-       #define CHIP_METAL_SHIFT                4
+       u                             chip_metal;
+#define CHIP_METAL_MASK                        0xff
+#define CHIP_METAL_SHIFT               0
 
-       u16                             chip_bond_id;
-       #define CHIP_BOND_ID_MASK               0xf
-       #define CHIP_BOND_ID_SHIFT              0
+       u                             chip_bond_id;
+#define CHIP_BOND_ID_MASK              0xff
+#define CHIP_BOND_ID_SHIFT             0
 
        u8                              num_engines;
        u8                              num_ports_in_engines;
@@ -670,12 +670,12 @@ struct ecore_dev {
 
        u8                              path_id;
        enum ecore_mf_mode              mf_mode;
-       #define IS_MF_DEFAULT(_p_hwfn)  \
-                       (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
-       #define IS_MF_SI(_p_hwfn)       \
-                       (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
-       #define IS_MF_SD(_p_hwfn)       \
-                       (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+#define IS_MF_DEFAULT(_p_hwfn) \
+       (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn)      \
+       (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
+#define IS_MF_SD(_p_hwfn)      \
+       (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
 
        int                             pcie_width;
        int                             pcie_speed;
@@ -804,6 +804,7 @@ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
 
 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+                                          struct ecore_ptt *p_ptt,
                                           u32 min_pf_rate);
 
 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw);
index 3dd953d..5e2f029 100644 (file)
@@ -67,6 +67,7 @@ union type0_task_context {
 
 /* TYPE-1 task context - ROCE */
 union type1_task_context {
+       struct regpair reserved; /* @DPDK */
 };
 
 struct src_ent {
index 8175619..8aa3c0b 100644 (file)
@@ -437,7 +437,7 @@ ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
        p_params->app_error = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
        p_params->num_app_entries = ECORE_MFW_GET_FIELD(p_app->flags,
                                                        DCBX_APP_NUM_ENTRIES);
-       for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+       for (i = 0; i < p_params->num_app_entries; i++) {
                entry = &p_params->app_entry[i];
                if (ieee) {
                        u8 sf_ieee;
@@ -619,7 +619,7 @@ ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
        return ECORE_SUCCESS;
 }
 
-static enum _ecore_status_t
+static void
 ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
                                  struct ecore_ptt *p_ptt,
                                  struct ecore_dcbx_get *params)
@@ -644,7 +644,7 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
        if (!enabled) {
                p_operational->enabled = enabled;
                p_operational->valid = false;
-               return ECORE_INVAL;
+               return;
        }
 
        p_data = &p_operational->params;
@@ -671,8 +671,6 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
        p_operational->err = err;
        p_operational->enabled = enabled;
        p_operational->valid = true;
-
-       return rc;
 }
 
 static enum _ecore_status_t
@@ -1145,7 +1143,7 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
        p_app->flags |= (u32)p_params->num_app_entries <<
                                        DCBX_APP_NUM_ENTRIES_SHIFT;
 
-       for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+       for (i = 0; i < p_params->num_app_entries; i++) {
                entry = &p_app->app_pri_tbl[i].entry;
                if (ieee) {
                        *entry &= ~DCBX_APP_SF_IEEE_MASK;
@@ -1340,7 +1338,7 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
        p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
        OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.config.params,
                    &dcbx_info->operational.params,
-                   sizeof(struct ecore_dcbx_admin_params));
+                   sizeof(p_hwfn->p_dcbx_info->set.config.params));
        p_hwfn->p_dcbx_info->set.config.valid = true;
 
        OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
index 6060f9e..d2dc044 100644 (file)
@@ -2856,12 +2856,10 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
        else
                p_dev->type = ECORE_DEV_TYPE_BB;
 
-       p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
-                                        MISCS_REG_CHIP_NUM);
-       p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
-                                        MISCS_REG_CHIP_REV);
-
-       MASK_FIELD(CHIP_REV, p_dev->chip_rev);
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_NUM);
+       p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM);
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_REV);
+       p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV);
 
        /* Learn number of HW-functions */
        tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
@@ -2885,20 +2883,19 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
        }
 #endif
 
-       p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
-                                      MISCS_REG_CHIP_TEST_REG) >> 4;
-       MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
-       p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
-                                          MISCS_REG_CHIP_METAL);
-       MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_TEST_REG);
+       p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID);
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_METAL);
+       p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL);
+
        DP_INFO(p_dev->hwfns,
-               "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+               "Chip details - %s%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n",
                ECORE_IS_BB(p_dev) ? "BB" : "AH",
                CHIP_REV_IS_A0(p_dev) ? 0 : 1,
                p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
                p_dev->chip_metal);
 
-       if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) {
+       if (ECORE_IS_BB_A0(p_dev)) {
                DP_NOTICE(p_dev->hwfns, false,
                          "The chip type/rev (BB A0) is not supported!\n");
                return ECORE_ABORTED;
@@ -4111,6 +4108,7 @@ int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
 
 /* API to configure WFQ from mcp link change */
 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+                                          struct ecore_ptt *p_ptt,
                                           u32 min_pf_rate)
 {
        int i;
@@ -4125,8 +4123,7 @@ void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
        for_each_hwfn(p_dev, i) {
                struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
 
-               __ecore_configure_vp_wfq_on_link_change(p_hwfn,
-                                                       p_hwfn->p_dpc_ptt,
+               __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
                                                        min_pf_rate);
        }
 }
index 7f4db0a..3db5cc3 100644 (file)
@@ -133,7 +133,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
        OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
 }
 
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+static u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
 {
        /* The HW is using DWORDS and we need to translate it to Bytes */
        return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
index 0750b2e..c246f18 100644 (file)
@@ -97,17 +97,6 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn);
  */
 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
 
-/**
- * @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
- *
- * @param p_hwfn
- * @param p_ptt
- *
- * @return u32
- */
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn    *p_hwfn,
-                         struct ecore_ptt      *p_ptt);
-
 /**
  * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
  *
index 2ff9715..fad0746 100644 (file)
@@ -746,7 +746,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
        /* Mintz bandwidth configuration */
        __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
                                           p_link, min_bw);
-       ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
+       ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
                                              p_link->min_pf_rate);
 
        p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
index b28d728..064d7e7 100644 (file)
@@ -1214,13 +1214,17 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
                             (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
                             &params);
 
-       ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
-                            mbx->req_virt->first_tlv.reply_address,
-                            sizeof(u64) / 4, &params);
-
+       /* Once PF copies the rc to the VF, the latter can continue and
+        * and send an additional message. So we have to make sure the
+        * channel would be re-set to ready prior to that.
+        */
        REG_WR(p_hwfn,
               GTT_BAR0_MAP_REG_USDM_RAM +
               USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
+       ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+                            mbx->req_virt->first_tlv.reply_address,
+                            sizeof(u64) / 4, &params);
 }
 
 static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
@@ -2806,12 +2810,13 @@ static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
                goto out;
        }
 
-       /* Update shadow copy of the VF configuration */
+       /* Update shadow copy of the VF configuration. In case shadow indicates
+        * the action should be blocked return success to VF to imitate the
+        * firmware behaviour in such case.
+        */
        if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
-           ECORE_SUCCESS) {
-               status = PFVF_STATUS_FAILURE;
+           ECORE_SUCCESS)
                goto out;
-       }
 
        /* Determine if the unicast filtering is acceptible by PF */
        if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
index be8b1ec..5ff8f28 100644 (file)
@@ -325,7 +325,7 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
 
        /* get HW info */
        p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
-       p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev;
+       p_hwfn->p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;
 
        DP_INFO(p_hwfn, "Chip details - %s%d\n",
                ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
index 67ebb1e..8592485 100644 (file)
@@ -1523,6 +1523,19 @@ virtio_dev_start(struct rte_eth_dev *dev)
        struct virtnet_rx *rxvq;
        struct virtnet_tx *txvq __rte_unused;
        struct virtio_hw *hw = dev->data->dev_private;
+       int ret;
+
+       /* Finish the initialization of the queues */
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               ret = virtio_dev_rx_queue_setup_finish(dev, i);
+               if (ret < 0)
+                       return ret;
+       }
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               ret = virtio_dev_tx_queue_setup_finish(dev, i);
+               if (ret < 0)
+                       return ret;
+       }
 
        /* check if lsc interrupt feature is enabled */
        if (dev->data->dev_conf.intr_conf.lsc) {
@@ -1551,6 +1564,8 @@ virtio_dev_start(struct rte_eth_dev *dev)
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxvq = dev->data->rx_queues[i];
+               /* Flush the old packets */
+               virtqueue_flush(rxvq->vq);
                virtqueue_notify(rxvq->vq);
        }
 
index 4feccf9..c491ec1 100644 (file)
@@ -88,10 +88,16 @@ int  virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
                const struct rte_eth_rxconf *rx_conf,
                struct rte_mempool *mb_pool);
 
+int virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
+                               uint16_t rx_queue_id);
+
 int  virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
                uint16_t nb_tx_desc, unsigned int socket_id,
                const struct rte_eth_txconf *tx_conf);
 
+int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
+                               uint16_t tx_queue_id);
+
 uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts);
 
index a33ef1a..43fac63 100644 (file)
@@ -72,7 +72,7 @@
 #define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
        ETH_TXQ_FLAGS_NOOFFLOADS)
 
-static void
+void
 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
 {
        struct vring_desc *dp, *dp_tail;
@@ -291,6 +291,10 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                /* prepend cannot fail, checked by caller */
                hdr = (struct virtio_net_hdr *)
                        rte_pktmbuf_prepend(cookie, head_size);
+               /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
+                * which is wrong. Below subtract restores correct pkt size.
+                */
+               cookie->pkt_len -= head_size;
                /* if offload disabled, it is not zeroed below, do it now */
                if (offload == 0) {
                        ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
@@ -412,9 +416,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
        struct virtio_hw *hw = dev->data->dev_private;
        struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
        struct virtnet_rx *rxvq;
-       int error, nbufs;
-       struct rte_mbuf *m;
-       uint16_t desc_idx;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -431,10 +432,24 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
        }
        dev->data->rx_queues[queue_idx] = rxvq;
 
+       return 0;
+}
+
+int
+virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+       uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+       struct virtnet_rx *rxvq = &vq->rxq;
+       struct rte_mbuf *m;
+       uint16_t desc_idx;
+       int error, nbufs;
+
+       PMD_INIT_FUNC_TRACE();
 
        /* Allocate blank mbufs for the each rx descriptor */
        nbufs = 0;
-       error = ENOSPC;
 
        if (hw->use_simple_rxtx) {
                for (desc_idx = 0; desc_idx < vq->vq_nentries;
@@ -525,7 +540,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
        struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
        struct virtnet_tx *txvq;
        uint16_t tx_free_thresh;
-       uint16_t desc_idx;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -554,9 +568,24 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        vq->vq_free_thresh = tx_free_thresh;
 
-       if (hw->use_simple_rxtx) {
-               uint16_t mid_idx  = vq->vq_nentries >> 1;
+       dev->data->tx_queues[queue_idx] = txvq;
+       return 0;
+}
+
+int
+virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
+                               uint16_t queue_idx)
+{
+       uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+       uint16_t mid_idx = vq->vq_nentries >> 1;
+       struct virtnet_tx *txvq = &vq->txq;
+       uint16_t desc_idx;
 
+       PMD_INIT_FUNC_TRACE();
+
+       if (hw->use_simple_rxtx) {
                for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
                        vq->vq_ring.avail->ring[desc_idx] =
                                desc_idx + mid_idx;
@@ -578,7 +607,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        VIRTQUEUE_DUMP(vq);
 
-       dev->data->tx_queues[queue_idx] = txvq;
        return 0;
 }
 
@@ -661,7 +689,7 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
                         * In case of SCTP, this will be wrong since it's a CRC
                         * but there's nothing we can do.
                         */
-                       uint16_t csum, off;
+                       uint16_t csum = 0, off;
 
                        rte_raw_cksum_mbuf(m, hdr->csum_start,
                                rte_pktmbuf_pkt_len(m) - hdr->csum_start,
index b651e53..a6c0b34 100644 (file)
@@ -65,6 +65,8 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
        struct vring_desc *start_dp;
        uint16_t desc_idx;
 
+       cookie->port = vq->rxq.port_id;
+
        desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
        dxp = &vq->vq_descx[desc_idx];
        dxp->cookie = (void *)cookie;
index 082e821..6f4845b 100644 (file)
@@ -97,6 +97,10 @@ vhost_user_read(int fd, struct vhost_user_msg *msg)
        }
 
        sz_payload = msg->size;
+
+       if ((size_t)sz_payload > sizeof(msg->payload))
+               goto fail;
+
        if (sz_payload) {
                ret = recv(fd, (void *)((char *)msg + sz_hdr), sz_payload, 0);
                if (ret < sz_payload) {
index 7f60e3e..4f8707a 100644 (file)
@@ -70,3 +70,28 @@ virtqueue_detatch_unused(struct virtqueue *vq)
                }
        return NULL;
 }
+
+/* Flush the elements in the used ring. */
+void
+virtqueue_flush(struct virtqueue *vq)
+{
+       struct vring_used_elem *uep;
+       struct vq_desc_extra *dxp;
+       uint16_t used_idx, desc_idx;
+       uint16_t nb_used, i;
+
+       nb_used = VIRTQUEUE_NUSED(vq);
+
+       for (i = 0; i < nb_used; i++) {
+               used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+               uep = &vq->vq_ring.used->ring[used_idx];
+               desc_idx = (uint16_t)uep->id;
+               dxp = &vq->vq_descx[desc_idx];
+               if (dxp->cookie != NULL) {
+                       rte_pktmbuf_free(dxp->cookie);
+                       dxp->cookie = NULL;
+               }
+               vq->vq_used_cons_idx++;
+               vq_ring_free_chain(vq, desc_idx);
+       }
+}
index 569c251..ec967a5 100644 (file)
@@ -290,6 +290,9 @@ void virtqueue_dump(struct virtqueue *vq);
  */
 struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
 
+/* Flush the elements in the used ring. */
+void virtqueue_flush(struct virtqueue *vq);
+
 static inline int
 virtqueue_full(const struct virtqueue *vq)
 {
@@ -298,6 +301,8 @@ virtqueue_full(const struct virtqueue *vq)
 
 #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
 
+void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+
 static inline void
 vq_update_avail_idx(struct virtqueue *vq)
 {
index aedac6c..2bd2f27 100644 (file)
@@ -441,10 +441,10 @@ vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
                     addr[0], addr[1], addr[2],
                     addr[3], addr[4], addr[5]);
 
-       val = *(const uint32_t *)addr;
+       memcpy(&val, addr, 4);
        VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
 
-       val = (addr[5] << 8) | addr[4];
+       memcpy(&val, addr + 4, 2);
        VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
 }
 
@@ -762,6 +762,8 @@ vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
 {
        struct vmxnet3_hw *hw = dev->data->dev_private;
 
+       ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr));
+       ether_addr_copy(mac_addr, &dev->data->mac_addrs[0]);
        vmxnet3_write_mac(hw, mac_addr->addr_bytes);
 }
 
index 3056f4f..5ef7773 100644 (file)
@@ -194,6 +194,8 @@ vmxnet3_dev_tx_queue_release(void *txq)
                vmxnet3_cmd_ring_release(&tq->cmd_ring);
                /* Release the memzone */
                rte_memzone_free(tq->mz);
+               /* Release the queue */
+               rte_free(tq);
        }
 }
 
@@ -214,6 +216,9 @@ vmxnet3_dev_rx_queue_release(void *rxq)
 
                /* Release the memzone */
                rte_memzone_free(rq->mz);
+
+               /* Release the queue */
+               rte_free(rq);
        }
 }
 
@@ -254,11 +259,9 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
        struct vmxnet3_comp_ring *comp_ring;
        int size;
 
-       if (rq != NULL) {
-               /* Release both the cmd_rings mbufs */
-               for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
-                       vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
-       }
+       /* Release both the cmd_rings mbufs */
+       for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+               vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
 
        ring0 = &rq->cmd_ring[0];
        ring1 = &rq->cmd_ring[1];
index ec5a2e6..1a2f07e 100644 (file)
@@ -199,7 +199,8 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
                        /* XXX No option headers supported */
                        memmove(ip6, ip, sizeof(struct ip6_hdr));
                        ip6->ip6_nxt = *nexthdr;
-                       ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+                       ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
+                                             sizeof(struct ip6_hdr));
                }
        } else
                ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
@@ -294,14 +295,15 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
                                sizeof(struct esp_hdr) + sa->iv_len);
                memmove(new_ip, ip4, ip_hdr_len);
                esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
+               ip4 = (struct ip *)new_ip;
                if (likely(ip4->ip_v == IPVERSION)) {
-                       ip4 = (struct ip *)new_ip;
                        ip4->ip_p = IPPROTO_ESP;
                        ip4->ip_len = htons(rte_pktmbuf_data_len(m));
                } else {
                        ip6 = (struct ip6_hdr *)new_ip;
                        ip6->ip6_nxt = IPPROTO_ESP;
-                       ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+                       ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
+                                             sizeof(struct ip6_hdr));
                }
        }
 
index ff1dccd..93393d5 100644 (file)
@@ -72,7 +72,8 @@ ipip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t is_ipv6,
 
                /* Per RFC4301 5.1.2.1 */
                outip6->ip6_flow = htonl(IP6_VERSION << 28 | ds_ecn << 20);
-               outip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+               outip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
+                                        sizeof(struct ip6_hdr));
 
                outip6->ip6_nxt = IPPROTO_ESP;
                outip6->ip6_hops = IPDEFTTL;
index ae921ad..a7fe6d6 100644 (file)
@@ -40,9 +40,6 @@ endif
 # Default target, can be overridden by command line or environment
 RTE_TARGET ?= x86_64-native-linuxapp-gcc
 
-# Location of PQoS library and includes,
-PQOS_LIBRARY_PATH = $(PQOS_INSTALL_PATH)/libpqos.a
-
 include $(RTE_SDK)/mk/rte.vars.mk
 
 # binary name
@@ -65,6 +62,6 @@ CFLAGS += -I$(PQOS_INSTALL_PATH)/../include
 CFLAGS_cat.o := -D_GNU_SOURCE
 
 LDLIBS += -L$(PQOS_INSTALL_PATH)
-LDLIBS += $(PQOS_LIBRARY_PATH)
+LDLIBS += -lpqos
 
 include $(RTE_SDK)/mk/rte.extapp.mk
index bad3930..f6f8b68 100644 (file)
 static const struct pqos_cap *m_cap;
 static const struct pqos_cpuinfo *m_cpu;
 static const struct pqos_capability *m_cap_l3ca;
+#if PQOS_VERSION <= 103
 static unsigned m_sockets[PQOS_MAX_SOCKETS];
+#else
+static unsigned int *m_sockets;
+#endif
 static unsigned m_sock_count;
 static struct cat_config m_config[PQOS_MAX_CORES];
 static unsigned m_config_count;
@@ -271,16 +275,16 @@ parse_l3ca(const char *l3ca)
                /* scan the separator '@', ','(next) or '\0'(finish) */
                l3ca += strcspn(l3ca, "@,");
 
-               if (*l3ca == '@') {
-                       /* explicit assign cpu_set */
-                       offset = parse_set(l3ca + 1, &cpuset);
-                       if (offset < 0 || CPU_COUNT(&cpuset) == 0)
-                               goto err;
+               if (*l3ca != '@')
+                       goto err;
 
-                       end = l3ca + 1 + offset;
-               } else
+               /* explicit assign cpu_set */
+               offset = parse_set(l3ca + 1, &cpuset);
+               if (offset < 0 || CPU_COUNT(&cpuset) == 0)
                        goto err;
 
+               end = l3ca + 1 + offset;
+
                if (*end != ',' && *end != '\0')
                        goto err;
 
@@ -353,9 +357,6 @@ parse_l3ca(const char *l3ca)
                idx++;
        } while (*end != '\0' && idx < PQOS_MAX_CORES);
 
-       if (m_config_count == 0)
-               goto err;
-
        return 0;
 
 err:
@@ -408,7 +409,11 @@ check_cpus(void)
                                        goto exit;
                                }
 
+#if PQOS_VERSION <= 103
                                ret = pqos_l3ca_assoc_get(cpu_id, &cos_id);
+#else
+                               ret = pqos_alloc_assoc_get(cpu_id, &cos_id);
+#endif
                                if (ret != PQOS_RETVAL_OK) {
                                        printf("PQOS: Failed to read COS "
                                                "associated to cpu %u.\n",
@@ -512,7 +517,11 @@ check_and_select_classes(unsigned cos_id_map[][PQOS_MAX_SOCKETS])
        for (j = 0; j < m_cpu->num_cores; j++) {
                cpu_id = m_cpu->cores[j].lcore;
 
+#if PQOS_VERSION <= 103
                ret = pqos_l3ca_assoc_get(cpu_id, &cos_id);
+#else
+               ret = pqos_alloc_assoc_get(cpu_id, &cos_id);
+#endif
                if (ret != PQOS_RETVAL_OK) {
                        printf("PQOS: Failed to read COS associated to "
                                "cpu %u on phy_pkg %u.\n", cpu_id, phy_pkg_id);
@@ -598,10 +607,19 @@ configure_cat(unsigned cos_id_map[][PQOS_MAX_SOCKETS])
 
                l3ca.cdp = m_config[i].cdp;
                if (m_config[i].cdp == 1) {
+#if PQOS_VERSION <= 103
                        l3ca.code_mask = m_config[i].code_mask;
                        l3ca.data_mask = m_config[i].data_mask;
+#else
+                       l3ca.u.s.code_mask = m_config[i].code_mask;
+                       l3ca.u.s.data_mask = m_config[i].data_mask;
+#endif
                } else
+#if PQOS_VERSION <= 103
                        l3ca.ways_mask = m_config[i].mask;
+#else
+                       l3ca.u.ways_mask = m_config[i].mask;
+#endif
 
                for (j = 0; j < m_sock_count; j++) {
                        phy_pkg_id = m_sockets[j];
@@ -637,7 +655,11 @@ configure_cat(unsigned cos_id_map[][PQOS_MAX_SOCKETS])
 
                        cos_id = cos_id_map[i][phy_pkg_id];
 
+#if PQOS_VERSION <= 103
                        ret = pqos_l3ca_assoc_set(cpu_id, cos_id);
+#else
+                       ret = pqos_alloc_assoc_set(cpu_id, cos_id);
+#endif
                        if (ret != PQOS_RETVAL_OK) {
                                printf("PQOS: Failed to associate COS %u to "
                                        "cpu %u\n", cos_id, cpu_id);
@@ -754,24 +776,43 @@ print_cat_config(void)
                        if (tab[n].cdp == 1) {
                                printf("PQOS: COS: %u, cMASK: 0x%llx, "
                                        "dMASK: 0x%llx\n", tab[n].class_id,
+#if PQOS_VERSION <= 103
                                        (unsigned long long)tab[n].code_mask,
                                        (unsigned long long)tab[n].data_mask);
+#else
+                                       (unsigned long long)tab[n].u.s.code_mask,
+                                       (unsigned long long)tab[n].u.s.data_mask);
+#endif
                        } else {
                                printf("PQOS: COS: %u, MASK: 0x%llx\n",
                                        tab[n].class_id,
+#if PQOS_VERSION <= 103
                                        (unsigned long long)tab[n].ways_mask);
+#else
+                                       (unsigned long long)tab[n].u.ways_mask);
+#endif
                        }
                }
        }
 
        for (i = 0; i < m_sock_count; i++) {
+#if PQOS_VERSION <= 103
                unsigned lcores[PQOS_MAX_SOCKET_CORES] = {0};
+#else
+               unsigned int *lcores = NULL;
+#endif
                unsigned lcount = 0;
                unsigned n = 0;
 
+#if PQOS_VERSION <= 103
                ret = pqos_cpu_get_cores(m_cpu, m_sockets[i],
                                PQOS_MAX_SOCKET_CORES, &lcount, &lcores[0]);
                if (ret != PQOS_RETVAL_OK) {
+#else
+               lcores = pqos_cpu_get_cores(m_cpu, m_sockets[i],
+                               &lcount);
+               if (lcores == NULL || lcount == 0) {
+#endif
                        printf("PQOS: Error retrieving core information!\n");
                        return;
                }
@@ -780,13 +821,21 @@ print_cat_config(void)
                for (n = 0; n < lcount; n++) {
                        unsigned class_id = 0;
 
+#if PQOS_VERSION <= 103
                        ret = pqos_l3ca_assoc_get(lcores[n], &class_id);
+#else
+                       ret = pqos_alloc_assoc_get(lcores[n], &class_id);
+#endif
                        if (ret == PQOS_RETVAL_OK)
                                printf("PQOS: CPU: %u, COS: %u\n", lcores[n],
                                        class_id);
                        else
                                printf("PQOS: CPU: %u, ERROR\n", lcores[n]);
                }
+
+#if PQOS_VERSION > 103
+               free(lcores);
+#endif
        }
 
 }
@@ -849,7 +898,12 @@ cat_fini(void)
        m_cap = NULL;
        m_cpu = NULL;
        m_cap_l3ca = NULL;
+#if PQOS_VERSION <= 103
        memset(m_sockets, 0, sizeof(m_sockets));
+#else
+       if (m_sockets != NULL)
+               free(m_sockets);
+#endif
        m_sock_count = 0;
        memset(m_config, 0, sizeof(m_config));
        m_config_count = 0;
@@ -875,7 +929,11 @@ cat_exit(void)
                        if (CPU_ISSET(cpu_id, &m_config[i].cpumask) == 0)
                                continue;
 
+#if PQOS_VERSION <= 103
                        ret = pqos_l3ca_assoc_set(cpu_id, 0);
+#else
+                       ret = pqos_alloc_assoc_set(cpu_id, 0);
+#endif
                        if (ret != PQOS_RETVAL_OK) {
                                printf("PQOS: Failed to associate COS 0 to "
                                        "cpu %u\n", cpu_id);
@@ -927,7 +985,9 @@ cat_init(int argc, char **argv)
        /* PQoS Initialization - Check and initialize CAT capability */
        cfg.fd_log = STDOUT_FILENO;
        cfg.verbose = 0;
+#if PQOS_VERSION <= 103
        cfg.cdp_cfg = PQOS_REQUIRE_CDP_ANY;
+#endif
        ret = pqos_init(&cfg);
        if (ret != PQOS_RETVAL_OK) {
                printf("PQOS: Error initializing PQoS library!\n");
@@ -953,9 +1013,14 @@ cat_init(int argc, char **argv)
        }
 
        /* Get CPU socket information */
+#if PQOS_VERSION <= 103
        ret = pqos_cpu_get_sockets(m_cpu, PQOS_MAX_SOCKETS, &m_sock_count,
                m_sockets);
        if (ret != PQOS_RETVAL_OK) {
+#else
+       m_sockets = pqos_cpu_get_sockets(m_cpu, &m_sock_count);
+       if (m_sockets == NULL) {
+#endif
                printf("PQOS: Error retrieving CPU socket information!\n");
                ret = -EFAULT;
                goto err;
index b40c49c..3e99b48 100644 (file)
@@ -987,6 +987,7 @@ parse_key(uint8_t *data, char *input_arg)
        unsigned byte_count;
        char *token;
 
+       errno = 0;
        for (byte_count = 0, token = strtok(input_arg, ":");
                        (byte_count < MAX_KEY_SIZE) && (token != NULL);
                        token = strtok(NULL, ":")) {
index 9307d48..0d0d8bf 100644 (file)
@@ -1027,6 +1027,7 @@ add_rules(const char *rule_path,
        char buff[LINE_MAX];
        FILE *fh = fopen(rule_path, "rb");
        unsigned int i = 0;
+       int val;
 
        if (fh == NULL)
                rte_exit(EXIT_FAILURE, "%s: Open %s failed\n", __func__,
@@ -1043,7 +1044,11 @@ add_rules(const char *rule_path,
                rte_exit(EXIT_FAILURE, "Not find any route entries in %s!\n",
                                rule_path);
 
-       fseek(fh, 0, SEEK_SET);
+       val = fseek(fh, 0, SEEK_SET);
+       if (val < 0) {
+               rte_exit(EXIT_FAILURE, "%s: File seek operation failed\n",
+                       __func__);
+       }
 
        acl_rules = calloc(acl_num, rule_size);
 
index 820448b..88b336e 100644 (file)
@@ -1205,10 +1205,7 @@ main(int argc, char **argv)
        message_pool = rte_mempool_create("ms_msg_pool",
                           NB_CORE_MSGBUF * RTE_MAX_LCORE,
                           sizeof(enum l2fwd_cmd), NB_CORE_MSGBUF / 2,
-                          0,
-                          rte_pktmbuf_pool_init, NULL,
-                          rte_pktmbuf_init, NULL,
-                          rte_socket_id(), 0);
+                          0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);
 
        if (message_pool == NULL)
                rte_exit(EXIT_FAILURE, "Create msg mempool failed\n");
index 2843d94..53b87c5 100644 (file)
@@ -67,6 +67,7 @@
 #include <rte_mempool.h>
 #include <cmdline_rdline.h>
 #include <cmdline_parse.h>
+#include <cmdline_parse_string.h>
 #include <cmdline_socket.h>
 #include <cmdline.h>
 #include "mp_commands.h"
@@ -76,7 +77,6 @@
 static const char *_MSG_POOL = "MSG_POOL";
 static const char *_SEC_2_PRI = "SEC_2_PRI";
 static const char *_PRI_2_SEC = "PRI_2_SEC";
-const unsigned string_size = 64;
 
 struct rte_ring *send_ring, *recv_ring;
 struct rte_mempool *message_pool;
@@ -121,7 +121,7 @@ main(int argc, char **argv)
                send_ring = rte_ring_create(_PRI_2_SEC, ring_size, rte_socket_id(), flags);
                recv_ring = rte_ring_create(_SEC_2_PRI, ring_size, rte_socket_id(), flags);
                message_pool = rte_mempool_create(_MSG_POOL, pool_size,
-                               string_size, pool_cache, priv_data_sz,
+                               STR_TOKEN_SIZE, pool_cache, priv_data_sz,
                                NULL, NULL, NULL, NULL,
                                rte_socket_id(), flags);
        } else {
index 8da244b..cde3abd 100644 (file)
@@ -78,7 +78,7 @@ static void cmd_send_parsed(void *parsed_result,
 
        if (rte_mempool_get(message_pool, &msg) < 0)
                rte_panic("Failed to get message buffer\n");
-       snprintf((char *)msg, string_size, "%s", res->message);
+       snprintf((char *)msg, STR_TOKEN_SIZE, "%s", res->message);
        if (rte_ring_enqueue(send_ring, msg) < 0) {
                printf("Failed to send message - message discarded\n");
                rte_mempool_put(message_pool, msg);
index 7e9a4ab..452b364 100644 (file)
@@ -34,7 +34,6 @@
 #ifndef _SIMPLE_MP_COMMANDS_H_
 #define _SIMPLE_MP_COMMANDS_H_
 
-extern const unsigned string_size;
 extern struct rte_ring *send_ring;
 extern struct rte_mempool *message_pool;
 extern volatile int quit;
index 8c77af8..01843ba 100644 (file)
@@ -83,7 +83,7 @@ int _lthread_desched_sleep(struct lthread *lt);
 
 void _lthread_free(struct lthread *lt);
 
-struct lthread_sched *_lthread_sched_get(int lcore_id);
+struct lthread_sched *_lthread_sched_get(unsigned int lcore_id);
 
 struct lthread_stack *_stack_alloc(void);
 
index c64c21f..fbda112 100644 (file)
@@ -562,11 +562,14 @@ void lthread_run(void)
  * Return the scheduler for this lcore
  *
  */
-struct lthread_sched *_lthread_sched_get(int lcore_id)
+struct lthread_sched *_lthread_sched_get(unsigned int lcore_id)
 {
-       if (lcore_id > LTHREAD_MAX_LCORES)
-               return NULL;
-       return schedcore[lcore_id];
+       struct lthread_sched *res = NULL;
+
+       if (lcore_id < LTHREAD_MAX_LCORES)
+               res = schedcore[lcore_id];
+
+       return res;
 }
 
 /*
@@ -578,10 +581,9 @@ int lthread_set_affinity(unsigned lcoreid)
        struct lthread *lt = THIS_LTHREAD;
        struct lthread_sched *dest_sched;
 
-       if (unlikely(lcoreid > LTHREAD_MAX_LCORES))
+       if (unlikely(lcoreid >= LTHREAD_MAX_LCORES))
                return POSIX_ERRNO(EINVAL);
 
-
        DIAG_EVENT(lt, LT_DIAG_LTHREAD_AFFINITY, lcoreid, 0);
 
        dest_sched = schedcore[lcoreid];
index 6876f83..e58388b 100644 (file)
@@ -199,11 +199,12 @@ void _lthread_tls_destroy(struct lthread *lt)
 void
 *lthread_getspecific(unsigned int k)
 {
+       void *res = NULL;
 
-       if (k > LTHREAD_MAX_KEYS)
-               return NULL;
+       if (k < LTHREAD_MAX_KEYS)
+               res = THIS_LTHREAD->tls->data[k];
 
-       return THIS_LTHREAD->tls->data[k];
+       return res;
 }
 
 /*
@@ -213,7 +214,7 @@ void
  */
 int lthread_setspecific(unsigned int k, const void *data)
 {
-       if (k > LTHREAD_MAX_KEYS)
+       if (k >= LTHREAD_MAX_KEYS)
                return POSIX_ERRNO(EINVAL);
 
        int n = THIS_LTHREAD->tls->nb_keys_inuse;
index 850b009..febae39 100644 (file)
@@ -161,6 +161,7 @@ static void initial_lthread(void *args __attribute__((unused)))
        pthread_override_set(1);
 
        uint64_t i;
+       int ret;
 
        /* initialize mutex for shared counter */
        print_count = 0;
@@ -187,7 +188,10 @@ static void initial_lthread(void *args __attribute__((unused)))
                pthread_attr_setaffinity_np(&attr, sizeof(rte_cpuset_t), &cpuset);
 
                /* create the thread */
-               pthread_create(&tid[i], &attr, helloworld_pthread, (void *) i);
+               ret = pthread_create(&tid[i], &attr,
+                               helloworld_pthread, (void *) i);
+               if (ret != 0)
+                       rte_exit(EXIT_FAILURE, "Cannot create helloworld thread\n");
        }
 
        /* wait for 1s to allow threads
index fe0221c..51ba5fb 100644 (file)
@@ -116,6 +116,7 @@ app_init_port(uint8_t portid, struct rte_mempool *mp)
        rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
        rx_conf.rx_free_thresh = 32;
        rx_conf.rx_drop_en = 0;
+       rx_conf.rx_deferred_start = 0;
 
        tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
        tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
@@ -123,6 +124,7 @@ app_init_port(uint8_t portid, struct rte_mempool *mp)
        tx_conf.tx_free_thresh = 0;
        tx_conf.tx_rs_thresh = 0;
        tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS;
+       tx_conf.tx_deferred_start = 0;
 
        /* init port */
        RTE_LOG(INFO, APP, "Initializing port %"PRIu8"... ", portid);
index a9c47be..d749165 100644 (file)
@@ -205,7 +205,8 @@ cmdline_printf(const struct cmdline *cl, const char *fmt, ...)
        }
        if (ret >= BUFSIZ)
                ret = BUFSIZ - 1;
-       write(cl->s_out, buf, ret);
+       ret = write(cl->s_out, buf, ret);
+       (void)ret;
        free(buf);
 #endif
 }
index b496067..28a7933 100644 (file)
@@ -149,7 +149,7 @@ match_inst(cmdline_parse_inst_t *inst, const char *buf,
           unsigned int nb_match_token, void *resbuf, unsigned resbuf_size)
 {
        unsigned int token_num=0;
-       cmdline_parse_token_hdr_t * token_p;
+       cmdline_parse_token_hdr_t *token_p = NULL;
        unsigned int i=0;
        int n = 0;
        struct cmdline_token_hdr token_hdr;
index 9019518..0ec8e41 100644 (file)
@@ -141,6 +141,7 @@ __rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
 
                __rte_crypto_sym_op_reset(op->sym);
                break;
+       case RTE_CRYPTO_OP_TYPE_UNDEFINED:
        default:
                break;
        }
index 836e483..f7e340d 100644 (file)
@@ -117,3 +117,32 @@ rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
        RTE_SET_USED(intr_handle);
        return 0;
 }
+
+int
+rte_epoll_wait(int epfd, struct rte_epoll_event *events,
+               int maxevents, int timeout)
+{
+       RTE_SET_USED(epfd);
+       RTE_SET_USED(events);
+       RTE_SET_USED(maxevents);
+       RTE_SET_USED(timeout);
+
+       return -ENOTSUP;
+}
+
+int
+rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
+{
+       RTE_SET_USED(epfd);
+       RTE_SET_USED(op);
+       RTE_SET_USED(fd);
+       RTE_SET_USED(event);
+
+       return -ENOTSUP;
+}
+
+int
+rte_intr_tls_epfd(void)
+{
+       return -ENOTSUP;
+}
index 2f81f7c..ca4ac27 100644 (file)
@@ -117,11 +117,14 @@ DPDK_2.1 {
        rte_eal_pci_detach;
        rte_eal_pci_read_config;
        rte_eal_pci_write_config;
+       rte_epoll_ctl;
+       rte_epoll_wait;
        rte_intr_allow_others;
        rte_intr_dp_is_en;
        rte_intr_efd_disable;
        rte_intr_efd_enable;
        rte_intr_rx_ctl;
+       rte_intr_tls_epfd;
        rte_memzone_free;
 
 } DPDK_2.0;
index 79160a6..35338ef 100644 (file)
@@ -137,7 +137,7 @@ rte_cpu_get_features(hwcap_registers_t out)
        _Elfx_auxv_t auxv;
 
        auxv_fd = open("/proc/self/auxv", O_RDONLY);
-       assert(auxv_fd);
+       assert(auxv_fd != -1);
        while (read(auxv_fd, &auxv, sizeof(auxv)) == sizeof(auxv)) {
                if (auxv.a_type == AT_HWCAP) {
                        out[REG_HWCAP] = auxv.a_un.a_val;
index fcf96e0..970a61c 100644 (file)
@@ -108,7 +108,7 @@ rte_cpu_get_features(hwcap_registers_t out)
        Elf64_auxv_t auxv;
 
        auxv_fd = open("/proc/self/auxv", O_RDONLY);
-       assert(auxv_fd);
+       assert(auxv_fd != -1);
        while (read(auxv_fd, &auxv,
                sizeof(Elf64_auxv_t)) == sizeof(Elf64_auxv_t)) {
                if (auxv.a_type == AT_HWCAP)
index 2e04c75..fb3abf1 100644 (file)
@@ -81,7 +81,7 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
                        : "memory" );           /* no-clobber list */
 #else
        asm volatile (
-            "mov %%ebx, %%edi\n"
+            "xchgl %%ebx, %%edi;\n"
                        MPLOCKED
                        "cmpxchg8b (%[dst]);"
                        "setz %[res];"
index 9b9635d..e92737d 100644 (file)
@@ -66,7 +66,7 @@ extern "C" {
 /**
  * Patch level number i.e. the z in yy.mm.z
  */
-#define RTE_VER_MINOR 3
+#define RTE_VER_MINOR 4
 
 /**
  * Extra string to be appended to version number
index 08516af..77a8615 100644 (file)
@@ -275,14 +275,14 @@ malloc_elem_free(struct malloc_elem *elem)
                return -1;
 
        rte_spinlock_lock(&(elem->heap->lock));
-       size_t sz = elem->size - sizeof(*elem);
+       size_t sz = elem->size - sizeof(*elem) - MALLOC_ELEM_TRAILER_LEN;
        uint8_t *ptr = (uint8_t *)&elem[1];
        struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
        if (next->state == ELEM_FREE){
                /* remove from free list, join to this one */
                elem_free_list_remove(next);
                join_elem(elem, next);
-               sz += sizeof(*elem);
+               sz += (sizeof(*elem) + MALLOC_ELEM_TRAILER_LEN);
        }
 
        /* check if previous element is free, if so join with it and return,
@@ -291,8 +291,8 @@ malloc_elem_free(struct malloc_elem *elem)
        if (elem->prev != NULL && elem->prev->state == ELEM_FREE) {
                elem_free_list_remove(elem->prev);
                join_elem(elem->prev, elem);
-               sz += sizeof(*elem);
-               ptr -= sizeof(*elem);
+               sz += (sizeof(*elem) + MALLOC_ELEM_TRAILER_LEN);
+               ptr -= (sizeof(*elem) + MALLOC_ELEM_TRAILER_LEN);
                elem = elem->prev;
        }
        malloc_elem_free_list_insert(elem);
index 2075282..59ed788 100644 (file)
@@ -570,11 +570,11 @@ eal_parse_args(int argc, char **argv)
                        break;
 
                case OPT_HUGE_DIR_NUM:
-                       internal_config.hugepage_dir = optarg;
+                       internal_config.hugepage_dir = strdup(optarg);
                        break;
 
                case OPT_FILE_PREFIX_NUM:
-                       internal_config.hugefile_prefix = optarg;
+                       internal_config.hugefile_prefix = strdup(optarg);
                        break;
 
                case OPT_SOCKET_MEM_NUM:
index aac05d7..4228067 100644 (file)
@@ -154,7 +154,7 @@ pci_get_uio_dev(struct rte_pci_device *dev, char *dstbuf,
                           unsigned int buflen, int create)
 {
        struct rte_pci_addr *loc = &dev->addr;
-       unsigned int uio_num;
+       int uio_num = -1;
        struct dirent *e;
        DIR *dir;
        char dirname[PATH_MAX];
index fb4a2f8..f288917 100644 (file)
@@ -301,7 +301,8 @@ vfio_mp_sync_thread(void __rte_unused * arg)
                                vfio_mp_sync_send_request(conn_sock, SOCKET_ERR);
                        else
                                vfio_mp_sync_send_fd(conn_sock, fd);
-                       close(fd);
+                       if (fd >= 0)
+                               close(fd);
                        break;
                case SOCKET_REQ_GROUP:
                        /* wait for group number */
index d96275a..3f8c0bc 100644 (file)
@@ -8,6 +8,34 @@
 #define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
 #endif
 
+/* SuSE version macro is the same as Linux kernel version */
+#ifndef SLE_VERSION
+#define SLE_VERSION(a, b, c) KERNEL_VERSION(a, b, c)
+#endif
+#ifdef CONFIG_SUSE_KERNEL
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 57))
+/* SLES12SP3 is at least 4.4.57+ based */
+#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 28))
+/* SLES12 is at least 3.12.28+ based */
+#define SLE_VERSION_CODE SLE_VERSION(12, 0, 0)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 61)) && \
+       (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)))
+/* SLES11 SP3 is at least 3.0.61+ based */
+#define SLE_VERSION_CODE SLE_VERSION(11, 3, 0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32))
+/* SLES11 SP1 is 2.6.32 based */
+#define SLE_VERSION_CODE SLE_VERSION(11, 1, 0)
+#elif (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 27))
+/* SLES11 GA is 2.6.27 based */
+#define SLE_VERSION_CODE SLE_VERSION(11, 0, 0)
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
+#endif /* CONFIG_SUSE_KERNEL */
+#ifndef SLE_VERSION_CODE
+#define SLE_VERSION_CODE 0
+#endif /* SLE_VERSION_CODE */
+
+
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && \
        (!(defined(RHEL_RELEASE_CODE) && \
           RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
 #define HAVE_SK_ALLOC_KERN_PARAM
 #endif
 
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || \
+       (defined(RHEL_RELEASE_CODE) && \
+        RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) || \
+       (SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(12, 3, 0))
 #define HAVE_TRANS_START_HELPER
 #endif
 
index f4dca5a..acb1a69 100644 (file)
@@ -1031,8 +1031,15 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
                        for (i = 0; i < numvecs; i++)
                                adapter->msix_entries[i].entry = i;
 
+#ifdef HAVE_PCI_ENABLE_MSIX
                        err = pci_enable_msix(pdev,
                                              adapter->msix_entries, numvecs);
+#else
+                       err = pci_enable_msix_range(pdev,
+                                       adapter->msix_entries,
+                                       numvecs,
+                                       numvecs);
+#endif
                        if (err == 0)
                                break;
                }
index 84826b2..aea253b 100644 (file)
@@ -3933,4 +3933,8 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type)
 #define HAVE_VF_VLAN_PROTO
 #endif /* >= 4.9.0 */
 
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
+#define HAVE_PCI_ENABLE_MSIX
+#endif
+
 #endif /* _KCOMPAT_H_ */
index 51db006..fb462d6 100644 (file)
@@ -417,9 +417,9 @@ rte_hash_reset(struct rte_hash *h)
 
 /* Search for an entry that can be pushed to its alternative location */
 static inline int
-make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
+make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt,
+               unsigned int *nr_pushes)
 {
-       static unsigned int nr_pushes;
        unsigned i, j;
        int ret;
        uint32_t next_bucket_idx;
@@ -456,15 +456,14 @@ make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
                        break;
 
        /* All entries have been pushed, so entry cannot be added */
-       if (i == RTE_HASH_BUCKET_ENTRIES || nr_pushes > RTE_HASH_MAX_PUSHES)
+       if (i == RTE_HASH_BUCKET_ENTRIES || ++(*nr_pushes) > RTE_HASH_MAX_PUSHES)
                return -ENOSPC;
 
        /* Set flag to indicate that this entry is going to be pushed */
        bkt->flag[i] = 1;
 
-       nr_pushes++;
        /* Need room in alternative bucket to insert the pushed entry */
-       ret = make_space_bucket(h, next_bkt[i]);
+       ret = make_space_bucket(h, next_bkt[i], nr_pushes);
        /*
         * After recursive function.
         * Clear flags and insert the pushed entry
@@ -472,7 +471,6 @@ make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
         * or return error
         */
        bkt->flag[i] = 0;
-       nr_pushes = 0;
        if (ret >= 0) {
                next_bkt[i]->sig_alt[ret] = bkt->sig_current[i];
                next_bkt[i]->sig_current[ret] = bkt->sig_alt[i];
@@ -515,6 +513,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
        unsigned n_slots;
        unsigned lcore_id;
        struct lcore_cache *cached_free_slots = NULL;
+       unsigned int nr_pushes = 0;
 
        if (h->add_key == ADD_KEY_MULTIWRITER)
                rte_spinlock_lock(h->multiwriter_lock);
@@ -643,7 +642,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
                 * if successful or return error and
                 * store the new slot back in the ring
                 */
-               ret = make_space_bucket(h, prim_bkt);
+               ret = make_space_bucket(h, prim_bkt, &nr_pushes);
                if (ret >= 0) {
                        prim_bkt->sig_current[ret] = sig;
                        prim_bkt->sig_alt[ret] = alt_hash;
index 32fdba0..f9fecbe 100644 (file)
@@ -511,7 +511,7 @@ rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
                uint8_t next_hop)
 {
        struct rte_lpm6_tbl_entry *tbl;
-       struct rte_lpm6_tbl_entry *tbl_next;
+       struct rte_lpm6_tbl_entry *tbl_next = NULL;
        int32_t rule_index;
        int status;
        uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
index a8c7aff..a3ca040 100644 (file)
@@ -396,6 +396,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
        if ((layers & RTE_PTYPE_INNER_L2_MASK) == 0)
                return pkt_type;
 
+       hdr_lens->inner_l2_len = 0;
        if (proto == rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
                eh = rte_pktmbuf_read(m, off, sizeof(*eh), &eh_copy);
                if (unlikely(eh == NULL))
index 5968683..e64bf59 100644 (file)
@@ -154,6 +154,8 @@ pdump_pktmbuf_copy(struct rte_mbuf *m, struct rte_mempool *mp)
        do {
                nseg++;
                if (pdump_pktmbuf_copy_data(seg, m) < 0) {
+                       if (seg != m_dup)
+                               rte_pktmbuf_free_seg(seg);
                        rte_pktmbuf_free(m_dup);
                        return NULL;
                }
index 32b8c8d..c1b311a 100644 (file)
@@ -450,6 +450,12 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
                n = max;
 
                prod_head = r->prod.head;
+
+               /* add rmb barrier to avoid load/load reorder in weak
+                * memory model. It is noop on x86
+                */
+               rte_smp_rmb();
+
                cons_tail = r->cons.tail;
                /* The subtraction is done between two unsigned 32bits value
                 * (the result is always modulo 32 bits even if we have
@@ -642,6 +648,12 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
                n = max;
 
                cons_head = r->cons.head;
+
+               /* add rmb barrier to avoid load/load reorder in weak
+                * memory model. It is noop on x86
+                */
+               rte_smp_rmb();
+
                prod_tail = r->prod.tail;
                /* The subtraction is done between two unsigned 32bits value
                 * (the result is always modulo 32 bits even if we have
index 18782fa..29ae2a3 100644 (file)
@@ -525,7 +525,7 @@ void rte_timer_manage(void)
                return;
        cur_time = rte_get_timer_cycles();
 
-#ifdef RTE_ARCH_X86_64
+#ifdef RTE_ARCH_64
        /* on 64-bit the value cached in the pending_head.expired will be
         * updated atomically, so we can consult that for a quick check here
         * outside the lock */
index aab74cd..638be79 100644 (file)
@@ -30,7 +30,7 @@
 # OF THE POSSIBILITY OF SUCH DAMAGE.
 
 Name: dpdk
-Version: 16.11.3
+Version: 16.11.4
 Release: 1
 Packager: packaging@6wind.com
 URL: http://dpdk.org
index 4356d7e..cb8bce9 100755 (executable)
@@ -114,9 +114,9 @@ printf "\
 " "$include" "$code" > "${temp}" &&
 if ${CC} ${CPPFLAGS} ${EXTRA_CPPFLAGS} ${CFLAGS} ${EXTRA_CFLAGS} \
        ${AUTO_CONFIG_CFLAGS} \
-       -c -o /dev/null "${temp}" 1>&${out} 2>&${err}
+       -c -o ${temp}.o "${temp}" 1>&${out} 2>&${err}
 then
-       rm -f "${temp}"
+       rm -f "${temp}" "${temp}.o"
        printf "\
 #ifndef %s
 #define %s 1
@@ -125,7 +125,7 @@ then
 " "${macro}" "${macro}" "${macro}" >> "${file}" &&
        printf 'Defining %s.\n' "${macro}"
 else
-       rm -f "${temp}"
+       rm -f "${temp}" "${temp}.o"
        printf "\
 /* %s is not defined. */
 
index fef59c4..e4c6d52 100755 (executable)
@@ -182,8 +182,7 @@ def check_modules():
 
         # special case for vfio_pci (module is named vfio-pci,
         # but its .ko is named vfio_pci)
-        sysfs_mods = map(lambda a:
-                         a if a != 'vfio_pci' else 'vfio-pci', sysfs_mods)
+        sysfs_mods = [a if a != 'vfio_pci' else 'vfio-pci' for a in sysfs_mods]
 
         for mod in mods:
             if mod["Name"] in sysfs_mods: