Imported Upstream version 16.11.1 11/5611/1
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>
Thu, 2 Mar 2017 15:15:51 +0000 (16:15 +0100)
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>
Fri, 3 Mar 2017 13:41:36 +0000 (14:41 +0100)
Change-Id: I1e965265578efaaf08e5628607f53d2386d2df9f
Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
100 files changed:
app/test-pmd/Makefile
app/test-pmd/cmdline.c
app/test/test_cryptodev_perf.c
buildtools/pmdinfogen/pmdinfogen.c
buildtools/pmdinfogen/pmdinfogen.h
doc/guides/cryptodevs/openssl.rst
doc/guides/nics/features/virtio.ini
doc/guides/rel_notes/release_16_11.rst
drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
drivers/crypto/kasumi/rte_kasumi_pmd.c
drivers/crypto/openssl/rte_openssl_pmd.c
drivers/crypto/qat/qat_crypto.c
drivers/crypto/qat/qat_crypto.h
drivers/crypto/snow3g/rte_snow3g_pmd.c
drivers/crypto/zuc/rte_zuc_pmd.c
drivers/net/af_packet/rte_eth_af_packet.c
drivers/net/bnx2x/bnx2x.c
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_ethdev.c
drivers/net/bnx2x/bnx2x_vfpf.c
drivers/net/cxgbe/base/t4_hw.c
drivers/net/cxgbe/cxgbe_main.c
drivers/net/ena/base/ena_com.c
drivers/net/ena/ena_ethdev.c
drivers/net/enic/enic.h
drivers/net/enic/enic_clsf.c
drivers/net/enic/enic_main.c
drivers/net/enic/enic_res.c
drivers/net/enic/enic_res.h
drivers/net/enic/enic_rxtx.c
drivers/net/fm10k/fm10k_ethdev.c
drivers/net/i40e/i40e_ethdev.c
drivers/net/i40e/i40e_ethdev.h
drivers/net/i40e/i40e_ethdev_vf.c
drivers/net/i40e/i40e_pf.c
drivers/net/i40e/i40e_pf.h
drivers/net/i40e/i40e_rxtx.c
drivers/net/i40e/i40e_rxtx_vec_common.h
drivers/net/i40e/i40e_rxtx_vec_sse.c
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/ixgbe/ixgbe_ethdev.h
drivers/net/ixgbe/ixgbe_rxtx.c
drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
drivers/net/mlx4/mlx4.c
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5_ethdev.c
drivers/net/mlx5/mlx5_prm.h
drivers/net/mlx5/mlx5_rxtx.c
drivers/net/mlx5/mlx5_rxtx.h
drivers/net/mlx5/mlx5_txq.c
drivers/net/nfp/nfp_net.c
drivers/net/pcap/rte_eth_pcap.c
drivers/net/qede/base/ecore_init_fw_funcs.c
drivers/net/qede/base/reg_addr.h
drivers/net/qede/qede_eth_if.c
drivers/net/qede/qede_eth_if.h
drivers/net/qede/qede_ethdev.c
drivers/net/qede/qede_ethdev.h
drivers/net/qede/qede_main.c
drivers/net/qede/qede_rxtx.c
drivers/net/vhost/rte_eth_vhost.c
drivers/net/virtio/virtio_ethdev.c
drivers/net/virtio/virtio_ethdev.h
drivers/net/virtio/virtio_pci.c
drivers/net/virtio/virtio_pci.h
drivers/net/virtio/virtio_rxtx.c
drivers/net/virtio/virtio_user/virtio_user_dev.c
drivers/net/virtio/virtio_user/virtio_user_dev.h
drivers/net/virtio/virtio_user_ethdev.c
drivers/net/virtio/virtqueue.h
drivers/net/vmxnet3/vmxnet3_rxtx.c
examples/ethtool/ethtool-app/ethapp.c
examples/ethtool/lib/rte_ethtool.c
examples/ip_pipeline/app.h
examples/ip_pipeline/init.c
examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
examples/ip_pipeline/thread_fe.c
examples/l2fwd-crypto/main.c
examples/vhost/main.c
lib/librte_cryptodev/rte_cryptodev.c
lib/librte_cryptodev/rte_cryptodev_pmd.h
lib/librte_eal/common/eal_common_devargs.c
lib/librte_eal/common/eal_common_vdev.c
lib/librte_eal/common/include/rte_version.h
lib/librte_eal/linuxapp/eal/eal_pci_uio.c
lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
lib/librte_ether/rte_ethdev.c
lib/librte_ether/rte_ether_version.map
lib/librte_mempool/rte_mempool.h
lib/librte_mempool/rte_mempool_stack.c
lib/librte_sched/rte_sched.c
lib/librte_vhost/fd_man.c
lib/librte_vhost/fd_man.h
lib/librte_vhost/vhost.c
lib/librte_vhost/vhost_user.c
lib/librte_vhost/virtio_net.c
pkg/dpdk.spec
tools/dpdk-devbind.py

index 891b85a..92c0c1b 100644 (file)
@@ -58,7 +58,9 @@ SRCS-y += csumonly.c
 SRCS-y += icmpecho.c
 SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c
 
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
 _LDLIBS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += -lrte_pmd_ixgbe
+endif
 
 CFLAGS_cmdline.o := -D_GNU_SOURCE
 
index 63b55dc..315a252 100644 (file)
@@ -10807,6 +10807,9 @@ cmd_set_vf_vlan_anti_spoof_parsed(
        int ret = 0;
        int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
 
+       if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+               return;
+
        ret = rte_pmd_ixgbe_set_vf_vlan_anti_spoof(res->port_id, res->vf_id,
                        is_on);
        switch (ret) {
@@ -10892,6 +10895,9 @@ cmd_set_vf_mac_anti_spoof_parsed(
        int ret;
        int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
 
+       if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+               return;
+
        ret = rte_pmd_ixgbe_set_vf_mac_anti_spoof(res->port_id, res->vf_id,
                        is_on);
        switch (ret) {
@@ -10977,6 +10983,9 @@ cmd_set_vf_vlan_stripq_parsed(
        int ret = 0;
        int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
 
+       if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+               return;
+
        ret = rte_pmd_ixgbe_set_vf_vlan_stripq(res->port_id, res->vf_id, is_on);
        switch (ret) {
        case 0:
@@ -11060,6 +11069,9 @@ cmd_set_vf_vlan_insert_parsed(
        struct cmd_vf_vlan_insert_result *res = parsed_result;
        int ret;
 
+       if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+               return;
+
        ret = rte_pmd_ixgbe_set_vf_vlan_insert(res->port_id, res->vf_id, res->vlan_id);
        switch (ret) {
        case 0:
@@ -11134,6 +11146,9 @@ cmd_set_tx_loopback_parsed(
        int ret;
        int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
 
+       if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+               return;
+
        ret = rte_pmd_ixgbe_set_tx_loopback(res->port_id, is_on);
        switch (ret) {
        case 0:
@@ -11211,6 +11226,9 @@ cmd_set_all_queues_drop_en_parsed(
        int ret = 0;
        int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
 
+       if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+               return;
+
        ret = rte_pmd_ixgbe_set_all_queues_drop_en(res->port_id, is_on);
        switch (ret) {
        case 0:
@@ -11294,6 +11312,9 @@ cmd_set_vf_split_drop_en_parsed(
        int ret;
        int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
 
+       if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+               return;
+
        ret = rte_pmd_ixgbe_set_vf_split_drop_en(res->port_id, res->vf_id,
                        is_on);
        switch (ret) {
@@ -11378,6 +11399,9 @@ cmd_set_vf_mac_addr_parsed(
        struct cmd_set_vf_mac_addr_result *res = parsed_result;
        int ret;
 
+       if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+               return;
+
        ret = rte_pmd_ixgbe_set_vf_mac_addr(res->port_id, res->vf_id,
                        &res->mac_addr);
        switch (ret) {
index 59a6891..89a6795 100644 (file)
@@ -2216,6 +2216,7 @@ test_perf_snow3G_optimise_cyclecount(struct perf_test_params *pparams)
                rte_pktmbuf_free(c_ops[i]->sym->m_src);
                rte_crypto_op_free(c_ops[i]);
        }
+       rte_cryptodev_sym_session_free(ts_params->dev_id, sess);
 
        return TEST_SUCCESS;
 }
@@ -2418,6 +2419,7 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
                rte_pktmbuf_free(c_ops[i]->sym->m_src);
                rte_crypto_op_free(c_ops[i]);
        }
+       rte_cryptodev_sym_session_free(ts_params->dev_id, sess);
 
        return TEST_SUCCESS;
 }
@@ -3039,6 +3041,7 @@ test_perf_aes_sha(uint8_t dev_id, uint16_t queue_id,
 
        for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
                rte_pktmbuf_free(mbufs[i]);
+       rte_cryptodev_sym_session_free(dev_id, sess);
 
        printf("\n");
        return TEST_SUCCESS;
@@ -3202,6 +3205,7 @@ test_perf_snow3g(uint8_t dev_id, uint16_t queue_id,
 
        for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
                rte_pktmbuf_free(mbufs[i]);
+       rte_cryptodev_sym_session_free(dev_id, sess);
 
        printf("\n");
        return TEST_SUCCESS;
@@ -3351,6 +3355,7 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
 
        for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
                rte_pktmbuf_free(mbufs[i]);
+       rte_cryptodev_sym_session_free(dev_id, sess);
 
        printf("\n");
        return TEST_SUCCESS;
@@ -3956,6 +3961,7 @@ perf_AES_GCM(uint8_t dev_id, uint16_t queue_id,
 
        for (i = 0; i < burst; i++)
                rte_pktmbuf_free(mbufs[i]);
+       rte_cryptodev_sym_session_free(dev_id, sess);
 
        return 0;
 }
index 59ab956..5bf08ce 100644 (file)
@@ -226,13 +226,14 @@ static int parse_elf(struct elf_info *info, const char *filename)
        }
        if (!info->symtab_start)
                fprintf(stderr, "%s has no symtab?\n", filename);
-
-       /* Fix endianness in symbols */
-       for (sym = info->symtab_start; sym < info->symtab_stop; sym++) {
-               sym->st_shndx = TO_NATIVE(endian, 16, sym->st_shndx);
-               sym->st_name  = TO_NATIVE(endian, 32, sym->st_name);
-               sym->st_value = TO_NATIVE(endian, ADDR_SIZE, sym->st_value);
-               sym->st_size  = TO_NATIVE(endian, ADDR_SIZE, sym->st_size);
+       else {
+               /* Fix endianness in symbols */
+               for (sym = info->symtab_start; sym < info->symtab_stop; sym++) {
+                       sym->st_shndx = TO_NATIVE(endian, 16, sym->st_shndx);
+                       sym->st_name  = TO_NATIVE(endian, 32, sym->st_name);
+                       sym->st_value = TO_NATIVE(endian, ADDR_SIZE, sym->st_value);
+                       sym->st_size  = TO_NATIVE(endian, ADDR_SIZE, sym->st_size);
+               }
        }
 
        if (symtab_shndx_idx != ~0U) {
index 1da2966..e9eabff 100644 (file)
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/mman.h>
+#ifdef __linux__
+#include <endian.h>
+#else
+#include <sys/endian.h>
+#endif
 #include <fcntl.h>
 #include <unistd.h>
 #include <elf.h>
 #include <rte_config.h>
 #include <rte_pci.h>
-#include <rte_byteorder.h>
 
 /* On BSD-alike OSes elf.h defines these according to host's word size */
 #undef ELF_ST_BIND
@@ -75,9 +79,9 @@
 #define CONVERT_NATIVE(fend, width, x) ({ \
 typeof(x) ___x; \
 if ((fend) == ELFDATA2LSB) \
-       ___x = rte_le_to_cpu_##width(x); \
+       ___x = le##width##toh(x); \
 else \
-       ___x = rte_be_to_cpu_##width(x); \
+       ___x = be##width##toh(x); \
        ___x; \
 })
 
index d2b5906..f1c39ba 100644 (file)
@@ -98,15 +98,15 @@ To verify real traffic l2fwd-crypto example can be used with this command:
 
 .. code-block:: console
 
-sudo ./build/l2fwd-crypto -c 0x3 -n 4 --vdev "crypto_openssl"
---vdev "crypto_openssl"-- -p 0x3 --chain CIPHER_HASH
---cipher_op ENCRYPT --cipher_algo AES_CBC
---cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
---iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff
---auth_op GENERATE --auth_algo SHA1_HMAC
---auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
-:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
-:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+       sudo ./build/l2fwd-crypto -c 0x3 -n 4 --vdev "crypto_openssl"
+       --vdev "crypto_openssl"-- -p 0x3 --chain CIPHER_HASH
+       --cipher_op ENCRYPT --cipher_algo AES_CBC
+       --cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+       --iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff
+       --auth_op GENERATE --auth_algo SHA1_HMAC
+       --auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+       :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+       :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
 
 Limitations
 -----------
index 41830c1..1d996c6 100644 (file)
@@ -14,6 +14,7 @@ Multicast MAC filter = Y
 VLAN filter          = Y
 Basic stats          = Y
 Stats per queue      = Y
+Multiprocess aware   = Y
 BSD nic_uio          = Y
 Linux UIO            = Y
 Linux VFIO           = Y
index 8c9ec65..fbf2e36 100644 (file)
@@ -598,3 +598,113 @@ Tested OSes
 * Ubuntu 14.04
 * Ubuntu 15.04
 * Ubuntu 16.04
+
+Fixes in 16.11 LTS Release
+--------------------------
+
+16.11.1
+~~~~~~~
+
+* app/test: fix symmetric session free in crypto perf tests
+* app/testpmd: fix check for invalid ports
+* app/testpmd: fix static build link ordering
+* crypto/aesni_gcm: fix IV size in capabilities
+* crypto/aesni_gcm: fix J0 padding bytes
+* crypto/aesni_mb: fix incorrect crypto session
+* crypto/openssl: fix extra bytes written at end of data
+* crypto/openssl: fix indentation in guide
+* crypto/qat: fix IV size in capabilities
+* crypto/qat: fix to avoid buffer overwrite in OOP case
+* cryptodev: fix crash on null dereference
+* cryptodev: fix loop in device query
+* devargs: reset driver name pointer on parsing failure
+* drivers/crypto: fix different auth/cipher keys
+* ethdev: check maximum number of queues for statistics
+* ethdev: fix extended statistics name index
+* ethdev: fix port data mismatched in multiple process model
+* ethdev: fix port lookup if none
+* ethdev: remove invalid function from version map
+* examples/ethtool: fix driver information
+* examples/ethtool: fix querying non-PCI devices
+* examples/ip_pipeline: fix coremask limitation
+* examples/ip_pipeline: fix parsing of pass-through pipeline
+* examples/l2fwd-crypto: fix overflow
+* examples/vhost: fix calculation of mbuf count
+* examples/vhost: fix lcore initialization
+* mempool: fix API documentation
+* mempool: fix stack handler dequeue
+* net/af_packet: fix fd use after free
+* net/bnx2x: fix Rx mode configuration
+* net/cxgbe/base: initialize variable before reading EEPROM
+* net/cxgbe: fix parenthesis on bitwise operation
+* net/ena: fix setting host attributes
+* net/enic: fix hardcoding of some flow director masks
+* net/enic: fix memory leak with oversized Tx packets
+* net/enic: remove unnecessary function parameter attributes
+* net/i40e: enable auto link update for 25G
+* net/i40e: fix Rx checksum flag
+* net/i40e: fix TC bandwidth definition
+* net/i40e: fix VF reset flow
+* net/i40e: fix checksum flag in x86 vector Rx
+* net/i40e: fix crash in close
+* net/i40e: fix deletion of all macvlan filters
+* net/i40e: fix ethertype filter on X722
+* net/i40e: fix link update delay
+* net/i40e: fix logging for Tx free threshold check
+* net/i40e: fix segment number in reassemble process
+* net/i40e: fix wrong return value when handling PF message
+* net/i40e: fix xstats value mapping
+* net/i40evf: fix casting between structs
+* net/i40evf: fix reporting of imissed packets
+* net/ixgbe: fix blocked interrupts
+* net/ixgbe: fix received packets number for ARM
+* net/ixgbe: fix received packets number for ARM NEON
+* net/ixgbevf: fix max packet length
+* net/mlx5: fix RSS hash result for flows
+* net/mlx5: fix Rx packet validation and type
+* net/mlx5: fix Tx doorbell
+* net/mlx5: fix endianness in Tx completion queue
+* net/mlx5: fix inconsistent link status
+* net/mlx5: fix leak when starvation occurs
+* net/mlx5: fix link status query
+* net/mlx5: fix memory leak when parsing device params
+* net/mlx5: fix missing inline attributes
+* net/mlx5: fix updating total length of multi-packet send
+* net/mlx: fix IPv4 and IPv6 packet type
+* net/nfp: fix VLAN offload flags check
+* net/nfp: fix typo in Tx offload capabilities
+* net/pcap: fix timestamps in output pcap file
+* net/qede/base: fix FreeBSD build
+* net/qede: add vendor/device id info
+* net/qede: fix PF fastpath status block index
+* net/qede: fix filtering code
+* net/qede: fix function declaration
+* net/qede: fix per queue statisitics
+* net/qede: fix resource leak
+* net/vhost: fix socket file deleted on stop
+* net/vhost: fix unix socket not removed as closing
+* net/virtio-user: fix not properly reset device
+* net/virtio-user: fix wrongly get/set features
+* net/virtio: fix build without virtio-user
+* net/virtio: fix crash when number of virtio devices > 1
+* net/virtio: fix multiple process support
+* net/virtio: fix performance regression due to TSO
+* net/virtio: fix rewriting LSC flag
+* net/virtio: fix wrong Rx/Tx method for secondary process
+* net/virtio: optimize header reset on any layout
+* net/virtio: store IO port info locally
+* net/virtio: store PCI operators pointer locally
+* net/vmxnet3: fix Rx deadlock
+* pci: fix check of mknod
+* pmdinfogen: fix endianness with cross-compilation
+* pmdinfogen: fix null dereference
+* sched: fix crash when freeing port
+* usertools: fix active interface detection when binding
+* vdev: fix detaching with alias
+* vfio: fix file descriptor leak in multi-process
+* vhost: allow many vhost-user ports
+* vhost: do not GSO when no header is present
+* vhost: fix dead loop in enqueue path
+* vhost: fix guest/host physical address mapping
+* vhost: fix long stall of negotiation
+* vhost: fix memory leak
index dba5e15..af3d60f 100644 (file)
@@ -40,6 +40,7 @@
 #include <rte_vdev.h>
 #include <rte_malloc.h>
 #include <rte_cpuflags.h>
+#include <rte_byteorder.h>
 
 #include "aesni_gcm_pmd_private.h"
 
@@ -241,7 +242,8 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
         * to set BE LSB to 1, driver expects that 16B is allocated
         */
        if (op->cipher.iv.length == 12) {
-               op->cipher.iv.data[15] = 1;
+               uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12];
+               *iv_padd = rte_bswap32(1);
        }
 
        if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
index e824d4b..c51f82a 100644 (file)
@@ -77,8 +77,8 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
                                        .increment = 0
                                },
                                .iv_size = {
-                                       .min = 16,
-                                       .max = 16,
+                                       .min = 12,
+                                       .max = 12,
                                        .increment = 0
                                }
                        }, }
index f07cd07..7443b47 100644 (file)
@@ -322,6 +322,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
                        rte_mempool_put(qp->sess_mp, _sess);
                        sess = NULL;
                }
+               op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
        }
 
        return sess;
index b119da2..c22128d 100644 (file)
@@ -137,7 +137,7 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
                if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8)
                        return -EINVAL;
                /* Initialize key */
-               sso_kasumi_init_f8_key_sched(xform->cipher.key.data,
+               sso_kasumi_init_f8_key_sched(cipher_xform->cipher.key.data,
                                &sess->pKeySched_cipher);
        }
 
@@ -147,7 +147,7 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
                        return -EINVAL;
                sess->auth_op = auth_xform->auth.op;
                /* Initialize key */
-               sso_kasumi_init_f9_key_sched(xform->auth.key.data,
+               sso_kasumi_init_f9_key_sched(auth_xform->auth.key.data,
                                &sess->pKeySched_hash);
        }
 
index 5f8fa33..832ea1d 100644 (file)
@@ -496,6 +496,8 @@ process_openssl_cipher_encrypt(uint8_t *src, uint8_t *dst,
        if (EVP_EncryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
                goto process_cipher_encrypt_err;
 
+       EVP_CIPHER_CTX_set_padding(ctx, 0);
+
        if (EVP_EncryptUpdate(ctx, dst, &dstlen, src, srclen) <= 0)
                goto process_cipher_encrypt_err;
 
index 798cd98..a4119fc 100644 (file)
@@ -303,8 +303,8 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
                                        .increment = 8
                                },
                                .iv_size = {
-                                       .min = 16,
-                                       .max = 16,
+                                       .min = 12,
+                                       .max = 12,
                                        .increment = 0
                                }
                        }, }
@@ -956,7 +956,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
        uint32_t auth_len = 0, auth_ofs = 0;
        uint32_t min_ofs = 0;
        uint32_t digest_appended = 1;
-       uint64_t buf_start = 0;
+       uint64_t src_buf_start = 0, dst_buf_start = 0;
 
 
 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
@@ -1085,27 +1085,40 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
        if (do_cipher && do_auth)
                min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
 
-
-       /* Start DMA at nearest aligned address below min_ofs */
-       #define QAT_64_BTYE_ALIGN_MASK (~0x3f)
-       buf_start = rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs) &
-                                                       QAT_64_BTYE_ALIGN_MASK;
-
-       if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src)
-                       - rte_pktmbuf_headroom(op->sym->m_src)) > buf_start)) {
-               /* alignment has pushed addr ahead of start of mbuf
-                * so revert and take the performance hit
+       if (unlikely(op->sym->m_dst != NULL)) {
+               /* Out-of-place operation (OOP)
+                * Don't align DMA start. DMA the minimum data-set
+                * so as not to overwrite data in dest buffer
+                */
+               src_buf_start =
+                       rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
+               dst_buf_start =
+                       rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+       } else {
+               /* In-place operation
+                * Start DMA at nearest aligned address below min_ofs
                 */
-               buf_start = rte_pktmbuf_mtophys(op->sym->m_src);
+               src_buf_start =
+                       rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
+                                               & QAT_64_BTYE_ALIGN_MASK;
+
+               if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
+                                       rte_pktmbuf_headroom(op->sym->m_src))
+                                                       > src_buf_start)) {
+                       /* alignment has pushed addr ahead of start of mbuf
+                        * so revert and take the performance hit
+                        */
+                       src_buf_start =
+                               rte_pktmbuf_mtophys_offset(op->sym->m_src,
+                                                               min_ofs);
+               }
+               dst_buf_start = src_buf_start;
        }
 
-       qat_req->comn_mid.dest_data_addr =
-               qat_req->comn_mid.src_data_addr = buf_start;
-
        if (do_cipher) {
                cipher_param->cipher_offset =
-                                       (uint32_t)rte_pktmbuf_mtophys_offset(
-                                       op->sym->m_src, cipher_ofs) - buf_start;
+                               (uint32_t)rte_pktmbuf_mtophys_offset(
+                               op->sym->m_src, cipher_ofs) - src_buf_start;
                cipher_param->cipher_length = cipher_len;
        } else {
                cipher_param->cipher_offset = 0;
@@ -1113,7 +1126,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
        }
        if (do_auth) {
                auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
-                                       op->sym->m_src, auth_ofs) - buf_start;
+                               op->sym->m_src, auth_ofs) - src_buf_start;
                auth_param->auth_len = auth_len;
        } else {
                auth_param->auth_off = 0;
@@ -1134,21 +1147,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
                        qat_req->comn_mid.src_length
                                += op->sym->auth.digest.length;
        }
-
-       /* out-of-place operation (OOP) */
-       if (unlikely(op->sym->m_dst != NULL)) {
-
-               if (do_auth)
-                       qat_req->comn_mid.dest_data_addr =
-                               rte_pktmbuf_mtophys_offset(op->sym->m_dst,
-                                               auth_ofs)
-                                               - auth_param->auth_off;
-               else
-                       qat_req->comn_mid.dest_data_addr =
-                               rte_pktmbuf_mtophys_offset(op->sym->m_dst,
-                                               cipher_ofs)
-                                               - cipher_param->cipher_offset;
-       }
+       qat_req->comn_mid.src_data_addr = src_buf_start;
+       qat_req->comn_mid.dest_data_addr = dst_buf_start;
 
        if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
                        ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
index 0afe74e..6b84488 100644 (file)
@@ -43,6 +43,7 @@
  */
 #define ALIGN_POW2_ROUNDUP(num, align) \
        (((num) + (align) - 1) & ~((align) - 1))
+#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
 
 /**
  * Structure associated with each queue.
index 3b4292a..0081fec 100644 (file)
@@ -137,7 +137,7 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
                if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
                        return -EINVAL;
                /* Initialize key */
-               sso_snow3g_init_key_sched(xform->cipher.key.data,
+               sso_snow3g_init_key_sched(cipher_xform->cipher.key.data,
                                &sess->pKeySched_cipher);
        }
 
@@ -147,7 +147,7 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
                        return -EINVAL;
                sess->auth_op = auth_xform->auth.op;
                /* Initialize key */
-               sso_snow3g_init_key_sched(xform->auth.key.data,
+               sso_snow3g_init_key_sched(auth_xform->auth.key.data,
                                &sess->pKeySched_hash);
        }
 
index 3849119..7057fca 100644 (file)
@@ -136,7 +136,8 @@ zuc_set_session_parameters(struct zuc_session *sess,
                if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
                        return -EINVAL;
                /* Copy the key */
-               memcpy(sess->pKey_cipher, xform->cipher.key.data, ZUC_IV_KEY_LENGTH);
+               memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
+                               ZUC_IV_KEY_LENGTH);
        }
 
        if (auth_xform) {
@@ -145,7 +146,8 @@ zuc_set_session_parameters(struct zuc_session *sess,
                        return -EINVAL;
                sess->auth_op = auth_xform->auth.op;
                /* Copy the key */
-               memcpy(sess->pKey_hash, xform->auth.key.data, ZUC_IV_KEY_LENGTH);
+               memcpy(sess->pKey_hash, auth_xform->auth.key.data,
+                               ZUC_IV_KEY_LENGTH);
        }
 
 
index ff45068..45c6519 100644 (file)
@@ -261,9 +261,16 @@ eth_dev_stop(struct rte_eth_dev *dev)
                sockfd = internals->rx_queue[i].sockfd;
                if (sockfd != -1)
                        close(sockfd);
-               sockfd = internals->tx_queue[i].sockfd;
-               if (sockfd != -1)
-                       close(sockfd);
+
+               /* Prevent use after free in case tx fd == rx fd */
+               if (sockfd != internals->tx_queue[i].sockfd) {
+                       sockfd = internals->tx_queue[i].sockfd;
+                       if (sockfd != -1)
+                               close(sockfd);
+               }
+
+               internals->rx_queue[i].sockfd = -1;
+               internals->tx_queue[i].sockfd = -1;
        }
 
        dev->data->dev_link.link_status = ETH_LINK_DOWN;
index 2856630..0d16a73 100644 (file)
@@ -1438,6 +1438,7 @@ bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,
 
                break;
 
+       case BNX2X_RX_MODE_ALLMULTI_PROMISC:
        case BNX2X_RX_MODE_PROMISC:
                /*
                 * According to deffinition of SI mode, iface in promisc mode
index 5cefea4..5709305 100644 (file)
@@ -1146,11 +1146,12 @@ struct bnx2x_softc {
 #define BNX2X_RECOVERY_NIC_LOADING 5
 
        uint32_t rx_mode;
-#define BNX2X_RX_MODE_NONE     0
-#define BNX2X_RX_MODE_NORMAL   1
-#define BNX2X_RX_MODE_ALLMULTI 2
-#define BNX2X_RX_MODE_PROMISC  3
-#define BNX2X_MAX_MULTICAST    64
+#define BNX2X_RX_MODE_NONE             0
+#define BNX2X_RX_MODE_NORMAL           1
+#define BNX2X_RX_MODE_ALLMULTI         2
+#define BNX2X_RX_MODE_ALLMULTI_PROMISC 3
+#define BNX2X_RX_MODE_PROMISC          4
+#define BNX2X_MAX_MULTICAST            64
 
        struct bnx2x_port port;
 
index 0eae433..a8aebbe 100644 (file)
@@ -256,6 +256,8 @@ bnx2x_promisc_enable(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
        sc->rx_mode = BNX2X_RX_MODE_PROMISC;
+       if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
+               sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
        bnx2x_set_rx_mode(sc);
 }
 
@@ -266,6 +268,8 @@ bnx2x_promisc_disable(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
        sc->rx_mode = BNX2X_RX_MODE_NORMAL;
+       if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
+               sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
        bnx2x_set_rx_mode(sc);
 }
 
@@ -276,6 +280,8 @@ bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
        sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
+       if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
+               sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
        bnx2x_set_rx_mode(sc);
 }
 
@@ -286,6 +292,8 @@ bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
        sc->rx_mode = BNX2X_RX_MODE_NORMAL;
+       if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
+               sc->rx_mode = BNX2X_RX_MODE_PROMISC;
        bnx2x_set_rx_mode(sc);
 }
 
@@ -422,6 +430,7 @@ bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                        xstats[num].value =
                                          *(uint64_t *)((char *)&sc->eth_stats +
                                          bnx2x_xstats_strings[num].offset_lo);
+               xstats[num].id = num;
        }
 
        return num;
index c47beb0..0ca0df8 100644 (file)
@@ -648,6 +648,7 @@ bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc)
                query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
                query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
                break;
+       case BNX2X_RX_MODE_ALLMULTI_PROMISC:
        case BNX2X_RX_MODE_PROMISC:
                query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
                query->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
index 7e79adf..c089b06 100644 (file)
@@ -1532,7 +1532,7 @@ int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
 {
        unsigned int base = adapter->params.pci.vpd_cap_addr;
        int ret;
-       u32 stats_reg;
+       u32 stats_reg = 0;
        int max_poll;
 
        /* VPD Accesses must alway be 4-byte aligned!
index 922155b..345f9b0 100644 (file)
@@ -959,7 +959,7 @@ int setup_rss(struct port_info *pi)
        dev_debug(adapter, "%s:  pi->rss_size = %u; pi->n_rx_qsets = %u\n",
                  __func__, pi->rss_size, pi->n_rx_qsets);
 
-       if (!pi->flags & PORT_RSS_DONE) {
+       if (!(pi->flags & PORT_RSS_DONE)) {
                if (adapter->flags & FULL_INIT_DONE) {
                        /* Fill default values with equal distribution */
                        for (j = 0; j < pi->rss_size; j++)
index 88053e3..bd6f3c6 100644 (file)
@@ -2590,19 +2590,11 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
        struct ena_com_admin_queue *admin_queue;
        struct ena_admin_set_feat_cmd cmd;
        struct ena_admin_set_feat_resp resp;
+       int ret;
 
-       int ret = 0;
-
-       if (unlikely(!ena_dev)) {
-               ena_trc_err("%s : ena_dev is NULL\n", __func__);
-               return ENA_COM_NO_DEVICE;
-       }
-
-       if (!ena_com_check_supported_feature_id(ena_dev,
-                                               ENA_ADMIN_HOST_ATTR_CONFIG)) {
-               ena_trc_warn("Set host attribute isn't supported\n");
-               return ENA_COM_PERMISSION;
-       }
+       /* Host attribute config is called before ena_com_get_dev_attr_feat
+        * so ena_com can't check if the feature is supported.
+        */
 
        memset(&cmd, 0x0, sizeof(cmd));
        admin_queue = &ena_dev->admin_queue;
index ab9a178..c1fd7bb 100644 (file)
@@ -357,12 +357,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
 
        rc = ena_com_set_host_attributes(ena_dev);
        if (rc) {
-               if (rc == -EPERM)
-                       RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
-               else
-                       RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
-
-               goto err;
+               RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
+               if (rc != -EPERM)
+                       goto err;
        }
 
        return;
@@ -413,11 +410,9 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
 
        rc = ena_com_set_host_attributes(&adapter->ena_dev);
        if (rc) {
-               if (rc == -EPERM)
-                       RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
-               else
-                       RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
-               goto err;
+               RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
+               if (rc != -EPERM)
+                       goto err;
        }
 
        return;
@@ -1228,14 +1223,14 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
                goto err_mmio_read_less;
        }
 
-       ena_config_host_info(ena_dev);
-
        /* To enable the msix interrupts the driver needs to know the number
         * of queues. So the driver uses polling mode to retrieve this
         * information.
         */
        ena_com_set_admin_polling_mode(ena_dev, true);
 
+       ena_config_host_info(ena_dev);
+
        /* Get Device Attributes and features */
        rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
        if (rc) {
index 865cd76..a3d2a0f 100644 (file)
@@ -102,6 +102,7 @@ struct enic_fdir {
 struct enic_soft_stats {
        rte_atomic64_t rx_nombuf;
        rte_atomic64_t rx_packet_errors;
+       rte_atomic64_t tx_oversized;
 };
 
 struct enic_memzone_entry {
@@ -301,8 +302,7 @@ int enic_link_update(struct enic *enic);
 void enic_fdir_info(struct enic *enic);
 void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
 void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
-                 struct rte_eth_fdir_masks *masks);
-void copy_fltr_v2(__rte_unused struct filter_v2 *fltr,
-                 __rte_unused struct rte_eth_fdir_input *input,
                  __rte_unused struct rte_eth_fdir_masks *masks);
+void copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+                 struct rte_eth_fdir_masks *masks);
 #endif /* _ENIC_H_ */
index bcf479a..487f804 100644 (file)
@@ -211,15 +211,15 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
                memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
 
                if (input->flow.ip4_flow.tos) {
-                       ip4_mask.type_of_service = 0xff;
+                       ip4_mask.type_of_service = masks->ipv4_mask.tos;
                        ip4_val.type_of_service = input->flow.ip4_flow.tos;
                }
                if (input->flow.ip4_flow.ttl) {
-                       ip4_mask.time_to_live = 0xff;
+                       ip4_mask.time_to_live = masks->ipv4_mask.ttl;
                        ip4_val.time_to_live = input->flow.ip4_flow.ttl;
                }
                if (input->flow.ip4_flow.proto) {
-                       ip4_mask.next_proto_id = 0xff;
+                       ip4_mask.next_proto_id = masks->ipv4_mask.proto;
                        ip4_val.next_proto_id = input->flow.ip4_flow.proto;
                }
                if (input->flow.ip4_flow.src_ip) {
@@ -299,7 +299,7 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
                memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
 
                if (input->flow.ipv6_flow.proto) {
-                       ipv6_mask.proto = 0xff;
+                       ipv6_mask.proto = masks->ipv6_mask.proto;
                        ipv6_val.proto = input->flow.ipv6_flow.proto;
                }
                for (i = 0; i < 4; i++) {
@@ -315,11 +315,11 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
                                        input->flow.ipv6_flow.dst_ip[i];
                }
                if (input->flow.ipv6_flow.tc) {
-                       ipv6_mask.vtc_flow = 0x00ff0000;
-                       ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 16;
+                       ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
+                       ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
                }
                if (input->flow.ipv6_flow.hop_limits) {
-                       ipv6_mask.hop_limits = 0xff;
+                       ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
                        ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
                }
 
index f0b15ac..1861a32 100644 (file)
@@ -137,6 +137,7 @@ static void enic_clear_soft_stats(struct enic *enic)
        struct enic_soft_stats *soft_stats = &enic->soft_stats;
        rte_atomic64_clear(&soft_stats->rx_nombuf);
        rte_atomic64_clear(&soft_stats->rx_packet_errors);
+       rte_atomic64_clear(&soft_stats->tx_oversized);
 }
 
 static void enic_init_soft_stats(struct enic *enic)
@@ -144,6 +145,7 @@ static void enic_init_soft_stats(struct enic *enic)
        struct enic_soft_stats *soft_stats = &enic->soft_stats;
        rte_atomic64_init(&soft_stats->rx_nombuf);
        rte_atomic64_init(&soft_stats->rx_packet_errors);
+       rte_atomic64_init(&soft_stats->tx_oversized);
        enic_clear_soft_stats(enic);
 }
 
@@ -183,7 +185,8 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
        r_stats->obytes = stats->tx.tx_bytes_ok;
 
        r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
-       r_stats->oerrors = stats->tx.tx_errors;
+       r_stats->oerrors = stats->tx.tx_errors
+                          + rte_atomic64_read(&soft_stats->tx_oversized);
 
        r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
 
index 8a230a1..867bd25 100644 (file)
@@ -89,10 +89,11 @@ int enic_get_vnic_config(struct enic *enic)
        /* max packet size is only defined in newer VIC firmware
         * and will be 0 for legacy firmware and VICs
         */
-       if (c->max_pkt_size > ENIC_DEFAULT_MAX_PKT_SIZE)
+       if (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE)
                enic->max_mtu = c->max_pkt_size - (ETHER_HDR_LEN + 4);
        else
-               enic->max_mtu = ENIC_DEFAULT_MAX_PKT_SIZE - (ETHER_HDR_LEN + 4);
+               enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE
+                               - (ETHER_HDR_LEN + 4);
        if (c->mtu == 0)
                c->mtu = 1500;
 
index 303530e..1135d2e 100644 (file)
 #define ENIC_MIN_MTU                   68
 
 /* Does not include (possible) inserted VLAN tag and FCS */
-#define ENIC_DEFAULT_MAX_PKT_SIZE      9022
+#define ENIC_DEFAULT_RX_MAX_PKT_SIZE   9022
+
+/* Does not include (possible) inserted VLAN tag and FCS */
+#define ENIC_TX_MAX_PKT_SIZE           9208
 
 #define ENIC_MULTICAST_PERFECT_FILTERS 32
 #define ENIC_UNICAST_PERFECT_FILTERS   32
index f762a26..912ea15 100644 (file)
@@ -477,16 +477,23 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        for (index = 0; index < nb_pkts; index++) {
                tx_pkt = *tx_pkts++;
+               pkt_len = tx_pkt->pkt_len;
+               data_len = tx_pkt->data_len;
+               ol_flags = tx_pkt->ol_flags;
                nb_segs = tx_pkt->nb_segs;
+
+               if (pkt_len > ENIC_TX_MAX_PKT_SIZE) {
+                       rte_pktmbuf_free(tx_pkt);
+                       rte_atomic64_inc(&enic->soft_stats.tx_oversized);
+                       continue;
+               }
+
                if (nb_segs > wq_desc_avail) {
                        if (index > 0)
                                goto post;
                        goto done;
                }
 
-               pkt_len = tx_pkt->pkt_len;
-               data_len = tx_pkt->data_len;
-               ol_flags = tx_pkt->ol_flags;
                mss = 0;
                vlan_id = 0;
                vlan_tag_insert = 0;
index 923690c..7c51d3b 100644 (file)
@@ -1315,6 +1315,7 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
                xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                        fm10k_hw_stats_strings[count].offset);
+               xstats[count].id = count;
                count++;
        }
 
@@ -1324,12 +1325,14 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                        xstats[count].value =
                                *(uint64_t *)(((char *)&hw_stats->q[q]) +
                                fm10k_hw_stats_rx_q_strings[i].offset);
+                       xstats[count].id = count;
                        count++;
                }
                for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
                        xstats[count].value =
                                *(uint64_t *)(((char *)&hw_stats->q[q]) +
                                fm10k_hw_stats_tx_q_strings[i].offset);
+                       xstats[count].id = count;
                        count++;
                }
        }
index 67778ba..bf7e5a0 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
@@ -1628,6 +1628,8 @@ i40e_phy_conf_link(struct i40e_hw *hw,
 
        /* use get_phy_abilities_resp value for the rest */
        phy_conf.phy_type = phy_ab.phy_type;
+       phy_conf.phy_type_ext = phy_ab.phy_type_ext;
+       phy_conf.fec_config = phy_ab.mod_type_ext;
        phy_conf.eee_capability = phy_ab.eee_capability;
        phy_conf.eeer = phy_ab.eeer_val;
        phy_conf.low_power_ctrl = phy_ab.d3_lpan;
@@ -1653,8 +1655,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
        struct rte_eth_conf *conf = &dev->data->dev_conf;
 
        speed = i40e_parse_link_speeds(conf->link_speeds);
-       if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
-               abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+       abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
        if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
                abilities |= I40E_AQ_PHY_AN_ENABLED;
        abilities |= I40E_AQ_PHY_LINK_ENABLED;
@@ -1875,18 +1876,17 @@ i40e_dev_close(struct rte_eth_dev *dev)
        /* shutdown and destroy the HMC */
        i40e_shutdown_lan_hmc(hw);
 
-       /* release all the existing VSIs and VEBs */
-       i40e_fdir_teardown(pf);
-       i40e_vsi_release(pf->main_vsi);
-
        for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
                i40e_vsi_release(pf->vmdq[i].vsi);
                pf->vmdq[i].vsi = NULL;
        }
-
        rte_free(pf->vmdq);
        pf->vmdq = NULL;
 
+       /* release all the existing VSIs and VEBs */
+       i40e_fdir_teardown(pf);
+       i40e_vsi_release(pf->main_vsi);
+
        /* shutdown the adminq */
        i40e_aq_queue_shutdown(hw, true);
        i40e_shutdown_adminq(hw);
@@ -1990,8 +1990,7 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
        uint8_t abilities = 0;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
-               abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+       abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
        return i40e_phy_conf_link(hw, abilities, speed);
 }
 
@@ -2025,11 +2024,11 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
                }
 
                link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
-               if (!wait_to_complete)
+               if (!wait_to_complete || link.link_status)
                        break;
 
                rte_delay_ms(CHECK_INTERVAL);
-       } while (!link.link_status && rep_cnt--);
+       } while (--rep_cnt);
 
        if (!link.link_status)
                goto out;
@@ -2532,6 +2531,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
                xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
                        rte_i40e_stats_strings[i].offset);
+               xstats[count].id = count;
                count++;
        }
 
@@ -2539,6 +2539,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
                xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                        rte_i40e_hw_port_strings[i].offset);
+               xstats[count].id = count;
                count++;
        }
 
@@ -2548,6 +2549,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                                *(uint64_t *)(((char *)hw_stats) +
                                rte_i40e_rxq_prio_strings[i].offset +
                                (sizeof(uint64_t) * prio));
+                       xstats[count].id = count;
                        count++;
                }
        }
@@ -2558,6 +2560,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                                *(uint64_t *)(((char *)hw_stats) +
                                rte_i40e_txq_prio_strings[i].offset +
                                (sizeof(uint64_t) * prio));
+                       xstats[count].id = count;
                        count++;
                }
        }
@@ -4136,6 +4139,9 @@ i40e_vsi_release(struct i40e_vsi *vsi)
        if (!vsi)
                return I40E_SUCCESS;
 
+       if (!vsi->adapter)
+               return -EFAULT;
+
        user_param = vsi->user_param;
 
        pf = I40E_VSI_TO_PF(vsi);
@@ -5844,7 +5850,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
 static int
 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
 {
-       int i, num;
+       int i, j, num;
        struct i40e_mac_filter *f;
        struct i40e_macvlan_filter *mv_f;
        int ret = I40E_SUCCESS;
@@ -5869,6 +5875,7 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
                TAILQ_FOREACH(f, &vsi->mac_list, next) {
                        (void)rte_memcpy(&mv_f[i].macaddr,
                                &f->mac_info.mac_addr, ETH_ADDR_LEN);
+                       mv_f[i].filter_type = f->mac_info.filter_type;
                        mv_f[i].vlan_id = 0;
                        i++;
                }
@@ -5878,6 +5885,8 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
                                        vsi->vlan_num, &f->mac_info.mac_addr);
                        if (ret != I40E_SUCCESS)
                                goto DONE;
+                       for (j = i; j < i + vsi->vlan_num; j++)
+                               mv_f[j].filter_type = f->mac_info.filter_type;
                        i += vsi->vlan_num;
                }
        }
@@ -8275,6 +8284,10 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
 
+/* For X722 */
+#define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
+#define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
+
 /* For X710 */
 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
 /* For XL710 */
@@ -8297,7 +8310,6 @@ i40e_dev_sync_phy_type(struct i40e_hw *hw)
        return 0;
 }
 
-
 static void
 i40e_configure_registers(struct i40e_hw *hw)
 {
@@ -8305,8 +8317,8 @@ i40e_configure_registers(struct i40e_hw *hw)
                uint32_t addr;
                uint64_t val;
        } reg_table[] = {
-               {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
-               {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
+               {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
+               {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
                {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
        };
        uint64_t reg;
@@ -8314,6 +8326,24 @@ i40e_configure_registers(struct i40e_hw *hw)
        int ret;
 
        for (i = 0; i < RTE_DIM(reg_table); i++) {
+               if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
+                       if (hw->mac.type == I40E_MAC_X722) /* For X722 */
+                               reg_table[i].val =
+                                       I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
+                       else /* For X710/XL710/XXV710 */
+                               reg_table[i].val =
+                                       I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE;
+               }
+
+               if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
+                       if (hw->mac.type == I40E_MAC_X722) /* For X722 */
+                               reg_table[i].val =
+                                       I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
+                       else /* For X710/XL710/XXV710 */
+                               reg_table[i].val =
+                                       I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
+               }
+
                if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
                        if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
                            I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
index 298cef4..5f3ecd9 100644 (file)
@@ -227,7 +227,7 @@ struct i40e_bw_info {
        /* Relative credits within same TC with respect to other VSIs or Comps */
        uint8_t  bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
        /* Bandwidth limit per TC */
-       uint8_t  bw_ets_credits[I40E_MAX_TRAFFIC_CLASS];
+       uint16_t bw_ets_credits[I40E_MAX_TRAFFIC_CLASS];
        /* Max bandwidth limit per TC */
        uint8_t  bw_ets_max[I40E_MAX_TRAFFIC_CLASS];
 };
@@ -527,7 +527,7 @@ struct i40e_vf {
        enum i40e_aq_link_speed link_speed;
        bool vf_reset;
        volatile uint32_t pend_cmd; /* pending command not finished yet */
-       uint32_t cmd_retval; /* return value of the cmd response from PF */
+       int32_t cmd_retval; /* return value of the cmd response from PF */
        u16 pend_msg; /* flags indicates events from pf not handled yet */
        uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
index aa306d6..640d316 100644 (file)
@@ -176,11 +176,11 @@ static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
        {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
                rx_unknown_protocol)},
        {"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
-       {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
-       {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
-       {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
-       {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
-       {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+       {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
+       {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
+       {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
+       {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
+       {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_errors)},
 };
 
 #define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
@@ -361,6 +361,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
                err = -1;
                do {
                        ret = i40evf_read_pfmsg(dev, &info);
+                       vf->cmd_retval = info.result;
                        if (ret == I40EVF_MSG_CMD) {
                                err = 0;
                                break;
@@ -965,7 +966,7 @@ i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                                                pstats->rx_broadcast;
        stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
                                                pstats->tx_unicast;
-       stats->ierrors = pstats->rx_discards;
+       stats->imissed = pstats->rx_discards;
        stats->oerrors = pstats->tx_errors + pstats->tx_discards;
        stats->ibytes = pstats->rx_bytes;
        stats->obytes = pstats->tx_bytes;
@@ -1336,8 +1337,9 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
        struct i40e_arq_event_info info;
-       struct i40e_virtchnl_msg *v_msg;
-       uint16_t pending, opcode;
+       uint16_t pending, aq_opc;
+       enum i40e_virtchnl_ops msg_opc;
+       enum i40e_status_code msg_ret;
        int ret;
 
        info.buf_len = I40E_AQ_BUF_SZ;
@@ -1346,7 +1348,6 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
                return;
        }
        info.msg_buf = vf->aq_resp;
-       v_msg = (struct i40e_virtchnl_msg *)&info.desc;
 
        pending = 1;
        while (pending) {
@@ -1357,32 +1358,39 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
                                    "ret: %d", ret);
                        break;
                }
-               opcode = rte_le_to_cpu_16(info.desc.opcode);
-
-               switch (opcode) {
+               aq_opc = rte_le_to_cpu_16(info.desc.opcode);
+               /* For the message sent from pf to vf, opcode is stored in
+                * cookie_high of struct i40e_aq_desc, while return error code
+                * are stored in cookie_low, Which is done by
+                * i40e_aq_send_msg_to_vf in PF driver.*/
+               msg_opc = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(
+                                                 info.desc.cookie_high);
+               msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
+                                                 info.desc.cookie_low);
+               switch (aq_opc) {
                case i40e_aqc_opc_send_msg_to_vf:
-                       if (v_msg->v_opcode == I40E_VIRTCHNL_OP_EVENT)
+                       if (msg_opc == I40E_VIRTCHNL_OP_EVENT)
                                /* process event*/
                                i40evf_handle_pf_event(dev, info.msg_buf,
                                                       info.msg_len);
                        else {
                                /* read message and it's expected one */
-                               if (v_msg->v_opcode == vf->pend_cmd) {
-                                       vf->cmd_retval = v_msg->v_retval;
+                               if (msg_opc == vf->pend_cmd) {
+                                       vf->cmd_retval = msg_ret;
                                        /* prevent compiler reordering */
                                        rte_compiler_barrier();
                                        _clear_cmd(vf);
                                } else
                                        PMD_DRV_LOG(ERR, "command mismatch,"
                                                "expect %u, get %u",
-                                               vf->pend_cmd, v_msg->v_opcode);
+                                               vf->pend_cmd, msg_opc);
                                PMD_DRV_LOG(DEBUG, "adminq response is received,"
-                                            " opcode = %d\n", v_msg->v_opcode);
+                                            " opcode = %d\n", msg_opc);
                        }
                        break;
                default:
                        PMD_DRV_LOG(ERR, "Request %u is not supported yet",
-                                   opcode);
+                                   aq_opc);
                        break;
                }
        }
index ddfc140..97b8ecc 100644 (file)
@@ -138,7 +138,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
        abs_vf_id = vf_id + hw->func_caps.vf_base_id;
 
        /* Notify VF that we are in VFR progress */
-       I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS);
+       I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_INPROGRESS);
 
        /*
         * If require a SW VF reset, a VFLR interrupt will be generated,
@@ -219,7 +219,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
        }
 
        /* Reset done, Set COMPLETE flag and clear reset bit */
-       I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED);
+       I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_COMPLETED);
        val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
        val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
        I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
@@ -247,6 +247,8 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
                return -EFAULT;
        }
 
+       I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+
        return ret;
 }
 
index cddc45c..244bac3 100644 (file)
 
 #define I40E_DPDK_OFFSET  0x100
 
-enum i40e_pf_vfr_state {
-       I40E_PF_VFR_INPROGRESS = 0,
-       I40E_PF_VFR_COMPLETED = 1,
-};
-
 /* DPDK pf driver specific command to VF */
 enum i40e_virtchnl_ops_dpdk {
        /*
index 7ae7d9f..1b25b2f 100644 (file)
@@ -138,8 +138,11 @@ i40e_rxd_error_to_pkt_flags(uint64_t qword)
        uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
 
 #define I40E_RX_ERR_BITS 0x3f
-       if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
+       if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) {
+               flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
                return flags;
+       }
+
        if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
                flags |= PKT_RX_IP_CKSUM_BAD;
        else
@@ -1916,8 +1919,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                return I40E_ERR_PARAM;
        }
        if (tx_free_thresh >= (nb_desc - 3)) {
-               PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
-                            "tx_free_thresh must be less than the "
+               PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
                             "number of TX descriptors minus 3. "
                             "(tx_free_thresh=%u port=%d queue=%d)",
                             (unsigned int)tx_free_thresh,
index 6cb5dce..990520f 100644 (file)
@@ -71,6 +71,7 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
                                        /* free up last mbuf */
                                        struct rte_mbuf *secondlast = start;
 
+                                       start->nb_segs--;
                                        while (secondlast->next != end)
                                                secondlast = secondlast->next;
                                        secondlast->data_len -= (rxq->crc_len -
index 7c84a41..b95cc8e 100644 (file)
@@ -148,6 +148,20 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
        const __m128i rss_vlan_msk = _mm_set_epi32(
                        0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
 
+       const __m128i cksum_mask = _mm_set_epi32(
+                       PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+                       PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+                       PKT_RX_EIP_CKSUM_BAD,
+                       PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+                       PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+                       PKT_RX_EIP_CKSUM_BAD,
+                       PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+                       PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+                       PKT_RX_EIP_CKSUM_BAD,
+                       PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+                       PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+                       PKT_RX_EIP_CKSUM_BAD);
+
        /* map rss and vlan type to rss hash and vlan flag */
        const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
                        0, 0, 0, 0,
@@ -160,14 +174,17 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
                        0, 0, PKT_RX_FDIR, 0);
 
        const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
-                       PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
-                       PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
-                       PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
-                       PKT_RX_EIP_CKSUM_BAD,
-                       PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
-                       PKT_RX_L4_CKSUM_BAD,
-                       PKT_RX_IP_CKSUM_BAD,
-                       0);
+                       /* shift right 1 bit to make sure it not exceed 255 */
+                       (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+                        PKT_RX_IP_CKSUM_BAD) >> 1,
+                       (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+                        PKT_RX_L4_CKSUM_BAD) >> 1,
+                       (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+                       (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+                       (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+                       (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+                       PKT_RX_IP_CKSUM_BAD >> 1,
+                       (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
 
        vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
        vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
@@ -181,6 +198,10 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
 
        l3_l4e = _mm_srli_epi32(vlan1, 22);
        l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
+       /* then we shift left 1 bit */
+       l3_l4e = _mm_slli_epi32(l3_l4e, 1);
+       /* we need to mask out the reduntant bits */
+       l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
 
        vlan0 = _mm_or_si128(vlan0, rss);
        vlan0 = _mm_or_si128(vlan0, l3_l4e);
index edc9b22..bac36e0 100644 (file)
@@ -2902,6 +2902,7 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
                xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                                rte_ixgbe_stats_strings[i].offset);
+               xstats[count].id = count;
                count++;
        }
 
@@ -2911,6 +2912,7 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                        xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                                        rte_ixgbe_rxq_strings[stat].offset +
                                        (sizeof(uint64_t) * i));
+                       xstats[count].id = count;
                        count++;
                }
        }
@@ -2921,6 +2923,7 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                        xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                                        rte_ixgbe_txq_strings[stat].offset +
                                        (sizeof(uint64_t) * i));
+                       xstats[count].id = count;
                        count++;
                }
        }
@@ -3168,7 +3171,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
        dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
        dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
-       dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
+       dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
        dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
        dev_info->max_vfs = dev->pci_dev->max_vfs;
@@ -3471,7 +3474,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        int64_t timeout;
        struct rte_eth_link link;
-       int intr_enable_delay = false;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -3504,20 +3506,19 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
                        timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
 
                ixgbe_dev_link_status_print(dev);
-
-               intr_enable_delay = true;
-       }
-
-       if (intr_enable_delay) {
+               intr->mask_original = intr->mask;
+               /* only disable lsc interrupt */
+               intr->mask &= ~IXGBE_EIMS_LSC;
                if (rte_eal_alarm_set(timeout * 1000,
                                      ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
                        PMD_DRV_LOG(ERR, "Error setting alarm");
-       } else {
-               PMD_DRV_LOG(DEBUG, "enable intr immediately");
-               ixgbe_enable_intr(dev);
-               rte_intr_enable(&(dev->pci_dev->intr_handle));
+               else
+                       intr->mask = intr->mask_original;
        }
 
+       PMD_DRV_LOG(DEBUG, "enable intr immediately");
+       ixgbe_enable_intr(dev);
+       rte_intr_enable(&dev->pci_dev->intr_handle);
 
        return 0;
 }
index 4ff6338..a4e2996 100644 (file)
@@ -165,6 +165,8 @@ struct ixgbe_hw_fdir_info {
 struct ixgbe_interrupt {
        uint32_t flags;
        uint32_t mask;
+       /*to save original mask during delayed handler */
+       uint32_t mask_original;
 };
 
 struct ixgbe_stat_mapping_registers {
index b2d9f45..c61ce47 100644 (file)
@@ -1402,17 +1402,19 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
        for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
             i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
                /* Read desc statuses backwards to avoid race condition */
-               for (j = LOOK_AHEAD-1; j >= 0; --j)
+               for (j = 0; j < LOOK_AHEAD; j++)
                        s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
 
-               for (j = LOOK_AHEAD - 1; j >= 0; --j)
-                       pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
-                                                      lo_dword.data);
+               rte_smp_rmb();
 
                /* Compute how many status bits were set */
-               nb_dd = 0;
-               for (j = 0; j < LOOK_AHEAD; ++j)
-                       nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
+               for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
+                               (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
+                       ;
+
+               for (j = 0; j < nb_dd; j++)
+                       pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
+                                                      lo_dword.data);
 
                nb_rx += nb_dd;
 
index f96cc85..e2715cb 100644 (file)
@@ -196,7 +196,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
        struct ixgbe_rx_entry *sw_ring;
        uint16_t nb_pkts_recd;
        int pos;
-       uint64_t var;
        uint8x16_t shuf_msk = {
                0xFF, 0xFF,
                0xFF, 0xFF,  /* skip 32 bits pkt_type */
@@ -255,26 +254,24 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                uint64x2_t mbp1, mbp2;
                uint8x16_t staterr;
                uint16x8_t tmp;
+               uint32_t var = 0;
                uint32_t stat;
 
                /* B.1 load 1 mbuf point */
                mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
 
-               /* Read desc statuses backwards to avoid race condition */
-               /* A.1 load 4 pkts desc */
-               descs[3] =  vld1q_u64((uint64_t *)(rxdp + 3));
-               rte_rmb();
-
                /* B.2 copy 2 mbuf point into rx_pkts  */
                vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
 
                /* B.1 load 1 mbuf point */
                mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
 
-               descs[2] =  vld1q_u64((uint64_t *)(rxdp + 2));
-               /* B.1 load 2 mbuf point */
-               descs[1] =  vld1q_u64((uint64_t *)(rxdp + 1));
+               /* A. load 4 pkts descs */
                descs[0] =  vld1q_u64((uint64_t *)(rxdp));
+               descs[1] =  vld1q_u64((uint64_t *)(rxdp + 1));
+               descs[2] =  vld1q_u64((uint64_t *)(rxdp + 2));
+               descs[3] =  vld1q_u64((uint64_t *)(rxdp + 3));
+               rte_smp_rmb();
 
                /* B.2 copy 2 mbuf point into rx_pkts  */
                vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
@@ -349,11 +346,19 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
                         pkt_mb1);
 
+               stat &= IXGBE_VPMD_DESC_DD_MASK;
+
                /* C.4 calc avaialbe number of desc */
-               var =  __builtin_popcount(stat & IXGBE_VPMD_DESC_DD_MASK);
-               nb_pkts_recd += var;
-               if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
+               if (likely(stat != IXGBE_VPMD_DESC_DD_MASK)) {
+                       while (stat & 0x01) {
+                               ++var;
+                               stat = stat >> 8;
+                       }
+                       nb_pkts_recd += var;
                        break;
+               } else {
+                       nb_pkts_recd += RTE_IXGBE_DESCS_PER_LOOP;
+               }
        }
 
        /* Update our internal tail pointer */
index da61a85..6d43a97 100644 (file)
@@ -2961,19 +2961,25 @@ rxq_cq_to_pkt_type(uint32_t flags)
        if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
                pkt_type =
                        TRANSPOSE(flags,
-                                 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, RTE_PTYPE_L3_IPV4) |
+                                 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
+                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN) |
                        TRANSPOSE(flags,
-                                 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, RTE_PTYPE_L3_IPV6) |
+                                 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
+                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
                        TRANSPOSE(flags,
-                                 IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_INNER_L3_IPV4) |
+                                 IBV_EXP_CQ_RX_IPV4_PACKET,
+                                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) |
                        TRANSPOSE(flags,
-                                 IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_INNER_L3_IPV6);
+                                 IBV_EXP_CQ_RX_IPV6_PACKET,
+                                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN);
        else
                pkt_type =
                        TRANSPOSE(flags,
-                                 IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_L3_IPV4) |
+                                 IBV_EXP_CQ_RX_IPV4_PACKET,
+                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN) |
                        TRANSPOSE(flags,
-                                 IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_L3_IPV6);
+                                 IBV_EXP_CQ_RX_IPV6_PACKET,
+                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN);
        return pkt_type;
 }
 
index 90cc35e..cb45fd0 100644 (file)
@@ -330,8 +330,10 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs)
                if (rte_kvargs_count(kvlist, params[i])) {
                        ret = rte_kvargs_process(kvlist, params[i],
                                                 mlx5_args_check, priv);
-                       if (ret != 0)
+                       if (ret != 0) {
+                               rte_kvargs_free(kvlist);
                                return ret;
+                       }
                }
        }
        rte_kvargs_free(kvlist);
index c0f73e9..06cfd01 100644 (file)
 #include <net/if.h>
 #include <sys/ioctl.h>
 #include <sys/socket.h>
+#include <sys/utsname.h>
 #include <netinet/in.h>
 #include <linux/ethtool.h>
 #include <linux/sockios.h>
+#include <linux/version.h>
 #include <fcntl.h>
 
 /* DPDK headers don't like -pedantic. */
 #include "mlx5_rxtx.h"
 #include "mlx5_utils.h"
 
+/* Add defines in case the running kernel is not the same as user headers. */
+#ifndef ETHTOOL_GLINKSETTINGS
+struct ethtool_link_settings {
+       uint32_t cmd;
+       uint32_t speed;
+       uint8_t duplex;
+       uint8_t port;
+       uint8_t phy_address;
+       uint8_t autoneg;
+       uint8_t mdio_support;
+       uint8_t eth_to_mdix;
+       uint8_t eth_tp_mdix_ctrl;
+       int8_t link_mode_masks_nwords;
+       uint32_t reserved[8];
+       uint32_t link_mode_masks[];
+};
+
+#define ETHTOOL_GLINKSETTINGS 0x0000004c
+#define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
+#define ETHTOOL_LINK_MODE_Autoneg_BIT 6
+#define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
+#define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
+#define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
+#define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
+#define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
+#define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
+#define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
+#define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
+#define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
+#define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
+#define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
+#define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
+#define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
+#define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_25G
+#define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
+#define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
+#define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_50G
+#define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
+#define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_100G
+#define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
+#define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
+#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
+#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
+#endif
+
 /**
  * Return private structure associated with an Ethernet device.
  *
@@ -690,8 +743,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
 }
 
 /**
- * Retrieve physical link information (unlocked version using new ioctl from
- * Linux 4.5).
+ * Retrieve physical link information (unlocked version using new ioctl).
  *
  * @param dev
  *   Pointer to Ethernet device structure.
@@ -701,7 +753,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
 static int
 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
 {
-#ifdef ETHTOOL_GLINKSETTINGS
        struct priv *priv = mlx5_get_priv(dev);
        struct ethtool_link_settings edata = {
                .cmd = ETHTOOL_GLINKSETTINGS,
@@ -728,7 +779,6 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
        sc = edata.link_mode_masks[0] |
                ((uint64_t)edata.link_mode_masks[1] << 32);
        priv->link_speed_capa = 0;
-       /* Link speeds available in kernel v4.5. */
        if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
                priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
        if (sc & (ETHTOOL_LINK_MODE_1000baseT_Full_BIT |
@@ -751,25 +801,18 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
                  ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT |
                  ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))
                priv->link_speed_capa |= ETH_LINK_SPEED_56G;
-       /* Link speeds available in kernel v4.6. */
-#ifdef HAVE_ETHTOOL_LINK_MODE_25G
        if (sc & (ETHTOOL_LINK_MODE_25000baseCR_Full_BIT |
                  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT |
                  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))
                priv->link_speed_capa |= ETH_LINK_SPEED_25G;
-#endif
-#ifdef HAVE_ETHTOOL_LINK_MODE_50G
        if (sc & (ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT |
                  ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))
                priv->link_speed_capa |= ETH_LINK_SPEED_50G;
-#endif
-#ifdef HAVE_ETHTOOL_LINK_MODE_100G
        if (sc & (ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT |
                  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT |
                  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
                  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
                priv->link_speed_capa |= ETH_LINK_SPEED_100G;
-#endif
        dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
                                ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
        dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
@@ -779,10 +822,6 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
                dev->data->dev_link = dev_link;
                return 0;
        }
-#else
-       (void)dev;
-       (void)wait_to_complete;
-#endif
        /* Link status is still the same. */
        return -1;
 }
@@ -798,12 +837,15 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
 int
 mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
 {
-       int ret;
-
-       ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete);
-       if (ret < 0)
-               ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete);
-       return ret;
+       struct utsname utsname;
+       int ver[3];
+
+       if (uname(&utsname) == -1 ||
+           sscanf(utsname.release, "%d.%d.%d",
+                  &ver[0], &ver[1], &ver[2]) != 3 ||
+           KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
+               return mlx5_link_update_unlocked_gset(dev, wait_to_complete);
+       return mlx5_link_update_unlocked_gs(dev, wait_to_complete);
 }
 
 /**
@@ -1141,7 +1183,7 @@ static int
 priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
 {
        struct ibv_async_event event;
-       int port_change = 0;
+       struct rte_eth_link *link = &dev->data->dev_link;
        int ret = 0;
 
        /* Read all message and acknowledge them. */
@@ -1149,29 +1191,24 @@ priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
                if (ibv_get_async_event(priv->ctx, &event))
                        break;
 
-               if (event.event_type == IBV_EVENT_PORT_ACTIVE ||
-                   event.event_type == IBV_EVENT_PORT_ERR)
-                       port_change = 1;
-               else
+               if (event.event_type != IBV_EVENT_PORT_ACTIVE &&
+                   event.event_type != IBV_EVENT_PORT_ERR)
                        DEBUG("event type %d on port %d not handled",
                              event.event_type, event.element.port_num);
                ibv_ack_async_event(&event);
        }
-
-       if (port_change ^ priv->pending_alarm) {
-               struct rte_eth_link *link = &dev->data->dev_link;
-
-               priv->pending_alarm = 0;
-               mlx5_link_update_unlocked(dev, 0);
-               if (((link->link_speed == 0) && link->link_status) ||
-                   ((link->link_speed != 0) && !link->link_status)) {
+       mlx5_link_update(dev, 0);
+       if (((link->link_speed == 0) && link->link_status) ||
+           ((link->link_speed != 0) && !link->link_status)) {
+               if (!priv->pending_alarm) {
                        /* Inconsistent status, check again later. */
                        priv->pending_alarm = 1;
                        rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
                                          mlx5_dev_link_status_handler,
                                          dev);
-               } else
-                       ret = 1;
+               }
+       } else {
+               ret = 1;
        }
        return ret;
 }
@@ -1191,6 +1228,7 @@ mlx5_dev_link_status_handler(void *arg)
 
        priv_lock(priv);
        assert(priv->pending_alarm == 1);
+       priv->pending_alarm = 0;
        ret = priv_dev_link_status_handler(priv, dev);
        priv_unlock(priv);
        if (ret)
index 7f31a2f..ed088ee 100644 (file)
@@ -61,9 +61,6 @@
 /* Invalidate a CQE. */
 #define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4)
 
-/* CQE value to inform that VLAN is stripped. */
-#define MLX5_CQE_VLAN_STRIPPED 0x1
-
 /* Maximum number of packets a multi-packet WQE can handle. */
 #define MLX5_MPW_DSEG_MAX 5
 
 #define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */
 #endif
 
-/* IPv4 packet. */
-#define MLX5_CQE_RX_IPV4_PACKET (1u << 2)
+/* CQE value to inform that VLAN is stripped. */
+#define MLX5_CQE_VLAN_STRIPPED (1u << 0)
+
+/* IPv4 options. */
+#define MLX5_CQE_RX_IP_EXT_OPTS_PACKET (1u << 1)
 
 /* IPv6 packet. */
-#define MLX5_CQE_RX_IPV6_PACKET (1u << 3)
+#define MLX5_CQE_RX_IPV6_PACKET (1u << 2)
+
+/* IPv4 packet. */
+#define MLX5_CQE_RX_IPV4_PACKET (1u << 3)
 
-/* Outer IPv4 packet. */
-#define MLX5_CQE_RX_OUTER_IPV4_PACKET (1u << 7)
+/* TCP packet. */
+#define MLX5_CQE_RX_TCP_PACKET (1u << 4)
 
-/* Outer IPv6 packet. */
-#define MLX5_CQE_RX_OUTER_IPV6_PACKET (1u << 8)
+/* UDP packet. */
+#define MLX5_CQE_RX_UDP_PACKET (1u << 5)
 
-/* Tunnel packet bit in the CQE. */
-#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 4)
+/* IP is fragmented. */
+#define MLX5_CQE_RX_IP_FRAG_PACKET (1u << 7)
+
+/* L2 header is valid. */
+#define MLX5_CQE_RX_L2_HDR_VALID (1u << 8)
 
-/* Outer IP checksum OK. */
-#define MLX5_CQE_RX_OUTER_IP_CSUM_OK (1u << 5)
+/* L3 header is valid. */
+#define MLX5_CQE_RX_L3_HDR_VALID (1u << 9)
 
-/* Outer UDP header and checksum OK. */
-#define MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK (1u << 6)
+/* L4 header is valid. */
+#define MLX5_CQE_RX_L4_HDR_VALID (1u << 10)
+
+/* Outer packet, 0 IPv4, 1 IPv6. */
+#define MLX5_CQE_RX_OUTER_PACKET (1u << 1)
+
+/* Tunnel packet bit in the CQE. */
+#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)
 
 /* Subset of struct mlx5_wqe_eth_seg. */
 struct mlx5_wqe_eth_seg_small {
@@ -163,8 +175,7 @@ struct mlx5_cqe {
        uint32_t rx_hash_res;
        uint8_t rx_hash_type;
        uint8_t rsvd1[11];
-       uint8_t hds_ip_ext;
-       uint8_t l4_hdr_type_etc;
+       uint16_t hdr_type_etc;
        uint16_t vlan_info;
        uint8_t rsvd2[12];
        uint32_t byte_cnt;
index 9b59801..3997b27 100644 (file)
 #include "mlx5_defs.h"
 #include "mlx5_prm.h"
 
+static inline int
+check_cqe(volatile struct mlx5_cqe *cqe,
+         unsigned int cqes_n, const uint16_t ci)
+         __attribute__((always_inline));
+
+static inline uint32_t
+txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
+       __attribute__((always_inline));
+
+static inline void
+mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
+       __attribute__((always_inline));
+
+static inline uint32_t
+rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
+       __attribute__((always_inline));
+
+static inline int
+mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
+                uint16_t cqe_cnt, uint32_t *rss_hash)
+                __attribute__((always_inline));
+
+static inline uint32_t
+rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
+                  __attribute__((always_inline));
+
 #ifndef NDEBUG
 
 /**
@@ -98,11 +124,6 @@ check_cqe_seen(volatile struct mlx5_cqe *cqe)
 
 #endif /* NDEBUG */
 
-static inline int
-check_cqe(volatile struct mlx5_cqe *cqe,
-         unsigned int cqes_n, const uint16_t ci)
-         __attribute__((always_inline));
-
 /**
  * Check whether CQE is valid.
  *
@@ -199,7 +220,7 @@ txq_complete(struct txq *txq)
        } while (1);
        if (unlikely(cqe == NULL))
                return;
-       wqe = &(*txq->wqes)[htons(cqe->wqe_counter) &
+       wqe = &(*txq->wqes)[ntohs(cqe->wqe_counter) &
                            ((1 << txq->wqe_n) - 1)].hdr;
        elts_tail = wqe->ctrl[3];
        assert(elts_tail < (1 << txq->wqe_n));
@@ -246,10 +267,6 @@ txq_mb2mp(struct rte_mbuf *buf)
        return buf->pool;
 }
 
-static inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
-       __attribute__((always_inline));
-
 /**
  * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
  * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
@@ -292,23 +309,20 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
  *
  * @param txq
  *   Pointer to TX queue structure.
+ * @param wqe
+ *   Pointer to the last WQE posted in the NIC.
  */
 static inline void
-mlx5_tx_dbrec(struct txq *txq)
+mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
 {
-       uint8_t *dst = (uint8_t *)((uintptr_t)txq->bf_reg + txq->bf_offset);
-       uint32_t data[4] = {
-               htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
-               htonl(txq->qp_num_8s),
-               0,
-               0,
-       };
+       uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
+       volatile uint64_t *src = ((volatile uint64_t *)wqe);
+
        rte_wmb();
        *txq->qp_db = htonl(txq->wqe_ci);
        /* Ensure ordering between DB record and BF copy. */
        rte_wmb();
-       memcpy(dst, (uint8_t *)data, 16);
-       txq->bf_offset ^= (1 << txq->bf_buf_size);
+       *dst = *src;
 }
 
 /**
@@ -594,7 +608,7 @@ next_pkt:
        txq->stats.opackets += i;
 #endif
        /* Ring QP doorbell. */
-       mlx5_tx_dbrec(txq);
+       mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe);
        txq->elts_head = elts_head;
        return i;
 }
@@ -802,7 +816,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
        /* Ring QP doorbell. */
        if (mpw.state == MLX5_MPW_STATE_OPENED)
                mlx5_mpw_close(txq, &mpw);
-       mlx5_tx_dbrec(txq);
+       mlx5_tx_dbrec(txq, mpw.wqe);
        txq->elts_head = elts_head;
        return i;
 }
@@ -1028,6 +1042,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                                mpw.data.raw =
                                        (volatile void *)&(*txq->wqes)[0];
                        ++mpw.pkts_n;
+                       mpw.total_len += length;
                        ++j;
                        if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
                                mlx5_mpw_inline_close(txq, &mpw);
@@ -1037,7 +1052,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                                inline_room -= length;
                        }
                }
-               mpw.total_len += length;
                elts_head = elts_head_next;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                /* Increment sent bytes counter. */
@@ -1071,7 +1085,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
                mlx5_mpw_inline_close(txq, &mpw);
        else if (mpw.state == MLX5_MPW_STATE_OPENED)
                mlx5_mpw_close(txq, &mpw);
-       mlx5_tx_dbrec(txq);
+       mlx5_tx_dbrec(txq, mpw.wqe);
        txq->elts_head = elts_head;
        return i;
 }
@@ -1091,30 +1105,28 @@ static inline uint32_t
 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
 {
        uint32_t pkt_type;
-       uint8_t flags = cqe->l4_hdr_type_etc;
+       uint16_t flags = ntohs(cqe->hdr_type_etc);
 
-       if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET)
+       if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) {
                pkt_type =
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_RX_OUTER_IPV4_PACKET,
-                                 RTE_PTYPE_L3_IPV4) |
-                       TRANSPOSE(flags,
-                                 MLX5_CQE_RX_OUTER_IPV6_PACKET,
-                                 RTE_PTYPE_L3_IPV6) |
                        TRANSPOSE(flags,
                                  MLX5_CQE_RX_IPV4_PACKET,
-                                 RTE_PTYPE_INNER_L3_IPV4) |
+                                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) |
                        TRANSPOSE(flags,
                                  MLX5_CQE_RX_IPV6_PACKET,
-                                 RTE_PTYPE_INNER_L3_IPV6);
-       else
+                                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN);
+               pkt_type |= ((cqe->pkt_info & MLX5_CQE_RX_OUTER_PACKET) ?
+                            RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
+                            RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
+       } else {
                pkt_type =
                        TRANSPOSE(flags,
                                  MLX5_CQE_L3_HDR_TYPE_IPV6,
-                                 RTE_PTYPE_L3_IPV6) |
+                                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
                        TRANSPOSE(flags,
                                  MLX5_CQE_L3_HDR_TYPE_IPV4,
-                                 RTE_PTYPE_L3_IPV4);
+                                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
+       }
        return pkt_type;
 }
 
@@ -1232,28 +1244,22 @@ static inline uint32_t
 rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
 {
        uint32_t ol_flags = 0;
-       uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK;
-       uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK;
-
-       if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) ||
-           (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6))
-               ol_flags |= TRANSPOSE(cqe->hds_ip_ext,
-                                     MLX5_CQE_L3_OK,
-                                     PKT_RX_IP_CKSUM_GOOD);
-       if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) ||
-           (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) ||
-           (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) ||
-           (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP))
-               ol_flags |= TRANSPOSE(cqe->hds_ip_ext,
-                                     MLX5_CQE_L4_OK,
-                                     PKT_RX_L4_CKSUM_GOOD);
+       uint16_t flags = ntohs(cqe->hdr_type_etc);
+
+       ol_flags =
+               TRANSPOSE(flags,
+                         MLX5_CQE_RX_L3_HDR_VALID,
+                         PKT_RX_IP_CKSUM_GOOD) |
+               TRANSPOSE(flags,
+                         MLX5_CQE_RX_L4_HDR_VALID,
+                         PKT_RX_L4_CKSUM_GOOD);
        if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
                ol_flags |=
-                       TRANSPOSE(cqe->l4_hdr_type_etc,
-                                 MLX5_CQE_RX_OUTER_IP_CSUM_OK,
+                       TRANSPOSE(flags,
+                                 MLX5_CQE_RX_L3_HDR_VALID,
                                  PKT_RX_IP_CKSUM_GOOD) |
-                       TRANSPOSE(cqe->l4_hdr_type_etc,
-                                 MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK,
+                       TRANSPOSE(flags,
+                                 MLX5_CQE_RX_L4_HDR_VALID,
                                  PKT_RX_L4_CKSUM_GOOD);
        return ol_flags;
 }
@@ -1310,10 +1316,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        }
                        while (pkt != seg) {
                                assert(pkt != (*rxq->elts)[idx]);
-                               seg = NEXT(pkt);
+                               rep = NEXT(pkt);
                                rte_mbuf_refcnt_set(pkt, 0);
                                __rte_mbuf_raw_free(pkt);
-                               pkt = seg;
+                               pkt = rep;
                        }
                        break;
                }
@@ -1338,7 +1344,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        /* Update packet information. */
                        pkt->packet_type = 0;
                        pkt->ol_flags = 0;
-                       if (rxq->rss_hash) {
+                       if (rss_hash_res && rxq->rss_hash) {
                                pkt->hash.rss = rss_hash_res;
                                pkt->ol_flags = PKT_RX_RSS_HASH;
                        }
@@ -1350,7 +1356,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                        pkt->ol_flags |=
                                                rxq_cq_to_ol_flags(rxq, cqe);
                                }
-                               if (cqe->l4_hdr_type_etc &
+                               if (cqe->hdr_type_etc &
                                    MLX5_CQE_VLAN_STRIPPED) {
                                        pkt->ol_flags |= PKT_RX_VLAN_PKT |
                                                PKT_RX_VLAN_STRIPPED;
index 5708c2a..909d80e 100644 (file)
@@ -251,8 +251,6 @@ struct txq {
        uint16_t elts_n:4; /* (*elts)[] length (in log2). */
        uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
        uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
-       uint16_t bf_buf_size:4; /* Log2 Blueflame size. */
-       uint16_t bf_offset; /* Blueflame offset. */
        uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
        uint32_t qp_num_8s; /* QP number shifted by 8. */
        volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
index 053665d..439908f 100644 (file)
@@ -220,8 +220,6 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
        tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
        tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
        tmpl->txq.bf_reg = qp->gen_data.bf->reg;
-       tmpl->txq.bf_offset = qp->gen_data.bf->offset;
-       tmpl->txq.bf_buf_size = log2above(qp->gen_data.bf->buf_size);
        tmpl->txq.cq_db = cq->dbrec;
        tmpl->txq.cqes =
                (volatile struct mlx5_cqe (*)[])
index c6b1587..099d82b 100644 (file)
@@ -1027,8 +1027,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
                dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-                                            DEV_RX_OFFLOAD_UDP_CKSUM |
-                                            DEV_RX_OFFLOAD_TCP_CKSUM;
+                                            DEV_TX_OFFLOAD_UDP_CKSUM |
+                                            DEV_TX_OFFLOAD_TCP_CKSUM;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -2043,9 +2043,9 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        new_ctrl = 0;
 
        if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
-           (mask & ETH_VLAN_FILTER_OFFLOAD))
-               RTE_LOG(INFO, PMD, "Not support for ETH_VLAN_FILTER_OFFLOAD or"
-                       " ETH_VLAN_FILTER_EXTEND");
+           (mask & ETH_VLAN_EXTEND_OFFLOAD))
+               RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or"
+                       " ETH_VLAN_EXTEND_OFFLOAD");
 
        /* Enable vlan strip if it is not configured yet */
        if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
index 0162f44..57b0b31 100644 (file)
@@ -247,7 +247,7 @@ calculate_timestamp(struct timeval *ts) {
 
        cycles = rte_get_timer_cycles() - start_cycles;
        cur_time.tv_sec = cycles / hz;
-       cur_time.tv_usec = (cycles % hz) * 10e6 / hz;
+       cur_time.tv_usec = (cycles % hz) * 1e6 / hz;
        timeradd(&start_time, &cur_time, ts);
 }
 
index e83eeb8..de08650 100644 (file)
@@ -89,7 +89,7 @@ voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
 #define QM_STOP_CMD_STRUCT_SIZE                        2
 #define QM_STOP_CMD_PAUSE_MASK_OFFSET  0
 #define QM_STOP_CMD_PAUSE_MASK_SHIFT   0
-#define QM_STOP_CMD_PAUSE_MASK_MASK            -1
+#define QM_STOP_CMD_PAUSE_MASK_MASK            0xffffffff /* @DPDK */
 #define QM_STOP_CMD_GROUP_ID_OFFSET            1
 #define QM_STOP_CMD_GROUP_ID_SHIFT             16
 #define QM_STOP_CMD_GROUP_ID_MASK              15
index ab88671..3c369aa 100644 (file)
@@ -30,7 +30,7 @@
        24
 
 #define  CDU_REG_CID_ADDR_PARAMS_NCIB                  ( \
-               0xff << 24)
+               0xffUL << 24) /* @DPDK */
 
 #define  XSDM_REG_OPERATION_GEN \
        0xf80408UL
 #define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
 #define XMAC_REG_CTRL_TX_EN (0x1 << 0)
 #define XMAC_REG_CTRL_RX_EN (0x1 << 1)
-#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xffUL << 24) /* @DPDK */
 #define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xff << 16)
 #define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16
 #define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xff << 16)
-#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xff << 24)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xffUL << 24) /* @DPDK */
 #define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfff << 0)
 #define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0
 #define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfff << 0)
index 1ae6127..30fded0 100644 (file)
@@ -310,86 +310,11 @@ qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
        ecore_get_vport_stats(edev, stats);
 }
 
-static int
-qed_configure_filter_ucast(struct ecore_dev *edev,
-                          struct qed_filter_ucast_params *params)
-{
-       struct ecore_filter_ucast ucast;
-
-       if (!params->vlan_valid && !params->mac_valid) {
-               DP_NOTICE(edev, true,
-                         "Tried configuring a unicast filter,"
-                         "but both MAC and VLAN are not set\n");
-               return -EINVAL;
-       }
-
-       memset(&ucast, 0, sizeof(ucast));
-       switch (params->type) {
-       case QED_FILTER_XCAST_TYPE_ADD:
-               ucast.opcode = ECORE_FILTER_ADD;
-               break;
-       case QED_FILTER_XCAST_TYPE_DEL:
-               ucast.opcode = ECORE_FILTER_REMOVE;
-               break;
-       case QED_FILTER_XCAST_TYPE_REPLACE:
-               ucast.opcode = ECORE_FILTER_REPLACE;
-               break;
-       default:
-               DP_NOTICE(edev, true, "Unknown unicast filter type %d\n",
-                         params->type);
-       }
-
-       if (params->vlan_valid && params->mac_valid) {
-               ucast.type = ECORE_FILTER_MAC_VLAN;
-               ether_addr_copy((struct ether_addr *)&params->mac,
-                               (struct ether_addr *)&ucast.mac);
-               ucast.vlan = params->vlan;
-       } else if (params->mac_valid) {
-               ucast.type = ECORE_FILTER_MAC;
-               ether_addr_copy((struct ether_addr *)&params->mac,
-                               (struct ether_addr *)&ucast.mac);
-       } else {
-               ucast.type = ECORE_FILTER_VLAN;
-               ucast.vlan = params->vlan;
-       }
-
-       ucast.is_rx_filter = true;
-       ucast.is_tx_filter = true;
-
-       return ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
-}
-
-static int
-qed_configure_filter_mcast(struct ecore_dev *edev,
-                          struct qed_filter_mcast_params *params)
-{
-       struct ecore_filter_mcast mcast;
-       int i;
-
-       memset(&mcast, 0, sizeof(mcast));
-       switch (params->type) {
-       case QED_FILTER_XCAST_TYPE_ADD:
-               mcast.opcode = ECORE_FILTER_ADD;
-               break;
-       case QED_FILTER_XCAST_TYPE_DEL:
-               mcast.opcode = ECORE_FILTER_REMOVE;
-               break;
-       default:
-               DP_NOTICE(edev, true, "Unknown multicast filter type %d\n",
-                         params->type);
-       }
-
-       mcast.num_mc_addrs = params->num;
-       for (i = 0; i < mcast.num_mc_addrs; i++)
-               ether_addr_copy((struct ether_addr *)&params->mac[i],
-                               (struct ether_addr *)&mcast.mac[i]);
-
-       return ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
-}
-
-int qed_configure_filter_rx_mode(struct ecore_dev *edev,
+int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
                                 enum qed_filter_rx_mode_type type)
 {
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct ecore_filter_accept_flags flags;
 
        memset(&flags, 0, sizeof(flags));
@@ -422,25 +347,6 @@ int qed_configure_filter_rx_mode(struct ecore_dev *edev,
                                       ECORE_SPQ_MODE_CB, NULL);
 }
 
-static int
-qed_configure_filter(struct ecore_dev *edev, struct qed_filter_params *params)
-{
-       switch (params->type) {
-       case QED_FILTER_TYPE_UCAST:
-               return qed_configure_filter_ucast(edev, &params->filter.ucast);
-       case QED_FILTER_TYPE_MCAST:
-               return qed_configure_filter_mcast(edev, &params->filter.mcast);
-       case QED_FILTER_TYPE_RX_MODE:
-               return qed_configure_filter_rx_mode(edev,
-                                                   params->filter.
-                                                   accept_flags);
-       default:
-               DP_NOTICE(edev, true, "Unknown filter type %d\n",
-                         (int)params->type);
-               return -EINVAL;
-       }
-}
-
 static const struct qed_eth_ops qed_eth_ops_pass = {
        INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
        INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
@@ -455,7 +361,6 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
        INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
        INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
        INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
-       INIT_STRUCT_FIELD(filter_config, &qed_configure_filter),
 };
 
 const struct qed_eth_ops *qed_get_eth_ops(void)
index 33655c3..ef4a4b5 100644 (file)
@@ -26,12 +26,6 @@ enum qed_filter_rx_mode_type {
        QED_FILTER_RX_MODE_TYPE_PROMISC,
 };
 
-enum qed_filter_xcast_params_type {
-       QED_FILTER_XCAST_TYPE_ADD,
-       QED_FILTER_XCAST_TYPE_DEL,
-       QED_FILTER_XCAST_TYPE_REPLACE,
-};
-
 enum qed_filter_type {
        QED_FILTER_TYPE_UCAST,
        QED_FILTER_TYPE_MCAST,
@@ -93,31 +87,6 @@ struct qed_stop_txq_params {
        uint8_t tx_queue_id;
 };
 
-struct qed_filter_ucast_params {
-       enum qed_filter_xcast_params_type type;
-       uint8_t vlan_valid;
-       uint16_t vlan;
-       uint8_t mac_valid;
-       unsigned char mac[ETHER_ADDR_LEN];
-};
-
-struct qed_filter_mcast_params {
-       enum qed_filter_xcast_params_type type;
-       uint8_t num;
-       unsigned char mac[QEDE_MAX_MCAST_FILTERS][ETHER_ADDR_LEN];
-};
-
-union qed_filter_type_params {
-       enum qed_filter_rx_mode_type accept_flags;
-       struct qed_filter_ucast_params ucast;
-       struct qed_filter_mcast_params mcast;
-};
-
-struct qed_filter_params {
-       enum qed_filter_type type;
-       union qed_filter_type_params filter;
-};
-
 struct qed_eth_ops {
        const struct qed_common_ops *common;
 
@@ -162,18 +131,15 @@ struct qed_eth_ops {
 
        void (*get_vport_stats)(struct ecore_dev *edev,
                                struct ecore_eth_stats *stats);
-
-       int (*filter_config)(struct ecore_dev *edev,
-                            struct qed_filter_params *params);
 };
 
 /* externs */
 
 extern const struct qed_common_ops qed_common_ops_pass;
 
-const struct qed_eth_ops *qed_get_eth_ops();
+const struct qed_eth_ops *qed_get_eth_ops(void);
 
-int qed_configure_filter_rx_mode(struct ecore_dev *edev,
+int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
                                 enum qed_filter_rx_mode_type type);
 
 #endif /* _QEDE_ETH_IF_H */
index d106dd0..6d6fb9d 100644 (file)
@@ -223,47 +223,181 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
        DP_INFO(edev, "*********************************\n");
 }
 
+static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
+{
+       memset(ucast, 0, sizeof(struct ecore_filter_ucast));
+       ucast->is_rx_filter = true;
+       ucast->is_tx_filter = true;
+       /* ucast->assert_on_error = true; - For debug */
+}
+
 static int
-qede_set_ucast_rx_mac(struct qede_dev *qdev,
-                     enum qed_filter_xcast_params_type opcode,
-                     uint8_t mac[ETHER_ADDR_LEN])
+qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
+                 bool add)
 {
-       struct ecore_dev *edev = &qdev->edev;
-       struct qed_filter_params filter_cmd;
-
-       memset(&filter_cmd, 0, sizeof(filter_cmd));
-       filter_cmd.type = QED_FILTER_TYPE_UCAST;
-       filter_cmd.filter.ucast.type = opcode;
-       filter_cmd.filter.ucast.mac_valid = 1;
-       rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
-       return qdev->ops->filter_config(edev, &filter_cmd);
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct qede_ucast_entry *tmp = NULL;
+       struct qede_ucast_entry *u;
+       struct ether_addr *mac_addr;
+
+       mac_addr  = (struct ether_addr *)ucast->mac;
+       if (add) {
+               SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
+                       if ((memcmp(mac_addr, &tmp->mac,
+                                   ETHER_ADDR_LEN) == 0) &&
+                            ucast->vlan == tmp->vlan) {
+                               DP_ERR(edev, "Unicast MAC is already added"
+                                      " with vlan = %u, vni = %u\n",
+                                      ucast->vlan,  ucast->vni);
+                                       return -EEXIST;
+                       }
+               }
+               u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
+                              RTE_CACHE_LINE_SIZE);
+               if (!u) {
+                       DP_ERR(edev, "Did not allocate memory for ucast\n");
+                       return -ENOMEM;
+               }
+               ether_addr_copy(mac_addr, &u->mac);
+               u->vlan = ucast->vlan;
+               SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
+               qdev->num_uc_addr++;
+       } else {
+               SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
+                       if ((memcmp(mac_addr, &tmp->mac,
+                                   ETHER_ADDR_LEN) == 0) &&
+                           ucast->vlan == tmp->vlan)
+                       break;
+               }
+               if (tmp == NULL) {
+                       DP_INFO(edev, "Unicast MAC is not found\n");
+                       return -EINVAL;
+               }
+               SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
+               qdev->num_uc_addr--;
+       }
+
+       return 0;
 }
 
-static void
-qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
-                 uint32_t index, __rte_unused uint32_t pool)
+static int
+qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
+                 bool add)
 {
-       struct qede_dev *qdev = eth_dev->data->dev_private;
-       struct ecore_dev *edev = &qdev->edev;
-       int rc;
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ether_addr *mac_addr;
+       struct qede_mcast_entry *tmp = NULL;
+       struct qede_mcast_entry *m;
+
+       mac_addr  = (struct ether_addr *)mcast->mac;
+       if (add) {
+               SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+                       if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
+                               DP_ERR(edev,
+                                       "Multicast MAC is already added\n");
+                               return -EEXIST;
+                       }
+               }
+               m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
+                       RTE_CACHE_LINE_SIZE);
+               if (!m) {
+                       DP_ERR(edev,
+                               "Did not allocate memory for mcast\n");
+                       return -ENOMEM;
+               }
+               ether_addr_copy(mac_addr, &m->mac);
+               SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
+               qdev->num_mc_addr++;
+       } else {
+               SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+                       if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
+                               break;
+               }
+               if (tmp == NULL) {
+                       DP_INFO(edev, "Multicast mac is not found\n");
+                       return -EINVAL;
+               }
+               SLIST_REMOVE(&qdev->mc_list_head, tmp,
+                            qede_mcast_entry, list);
+               qdev->num_mc_addr--;
+       }
 
-       PMD_INIT_FUNC_TRACE(edev);
+       return 0;
+}
 
-       if (index >= qdev->dev_info.num_mac_addrs) {
-               DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
-                      index, qdev->dev_info.num_mac_addrs);
-               return;
+static enum _ecore_status_t
+qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
+                bool add)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       enum _ecore_status_t rc;
+       struct ecore_filter_mcast mcast;
+       struct qede_mcast_entry *tmp;
+       uint16_t j = 0;
+
+       /* Multicast */
+       if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
+               if (add) {
+                       if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
+                               DP_ERR(edev,
+                                      "Mcast filter table limit exceeded, "
+                                      "Please enable mcast promisc mode\n");
+                               return -ECORE_INVAL;
+                       }
+               }
+               rc = qede_mcast_filter(eth_dev, ucast, add);
+               if (rc == 0) {
+                       DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
+                       memset(&mcast, 0, sizeof(mcast));
+                       mcast.num_mc_addrs = qdev->num_mc_addr;
+                       mcast.opcode = ECORE_FILTER_ADD;
+                       SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+                               ether_addr_copy(&tmp->mac,
+                                       (struct ether_addr *)&mcast.mac[j]);
+                               j++;
+                       }
+                       rc = ecore_filter_mcast_cmd(edev, &mcast,
+                                                   ECORE_SPQ_MODE_CB, NULL);
+               }
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "Failed to add multicast filter"
+                              " rc = %d, op = %d\n", rc, add);
+               }
+       } else { /* Unicast */
+               if (add) {
+                       if (qdev->num_uc_addr >= qdev->dev_info.num_mac_addrs) {
+                               DP_ERR(edev,
+                                      "Ucast filter table limit exceeded,"
+                                      " Please enable promisc mode\n");
+                               return -ECORE_INVAL;
+                       }
+               }
+               rc = qede_ucast_filter(eth_dev, ucast, add);
+               if (rc == 0)
+                       rc = ecore_filter_ucast_cmd(edev, ucast,
+                                                   ECORE_SPQ_MODE_CB, NULL);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
+                              rc, add);
+               }
        }
 
-       /* Adding macaddr even though promiscuous mode is set */
-       if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
-               DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
+       return rc;
+}
+
+static void
+qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
+                 uint32_t index, __rte_unused uint32_t pool)
+{
+       struct ecore_filter_ucast ucast;
 
-       /* Add MAC filters according to the unicast secondary macs */
-       rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
-                                  mac_addr->addr_bytes);
-       if (rc)
-               DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
+       qede_set_ucast_cmn_params(&ucast);
+       ucast.type = ECORE_FILTER_MAC;
+       ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
+       (void)qede_mac_int_ops(eth_dev, &ucast, 1);
 }
 
 static void
@@ -272,6 +406,7 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
        struct ether_addr mac_addr;
+       struct ecore_filter_ucast ucast;
        int rc;
 
        PMD_INIT_FUNC_TRACE(edev);
@@ -282,12 +417,15 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
                return;
        }
 
+       qede_set_ucast_cmn_params(&ucast);
+       ucast.opcode = ECORE_FILTER_REMOVE;
+       ucast.type = ECORE_FILTER_MAC;
+
        /* Use the index maintained by rte */
-       ether_addr_copy(&eth_dev->data->mac_addrs[index], &mac_addr);
-       rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
-                                  mac_addr.addr_bytes);
-       if (rc)
-               DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc);
+       ether_addr_copy(&eth_dev->data->mac_addrs[index],
+                       (struct ether_addr *)&ucast.mac);
+
+       ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
 }
 
 static void
@@ -295,6 +433,7 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_filter_ucast ucast;
        int rc;
 
        if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
@@ -306,10 +445,13 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
        }
 
        /* First remove the primary mac */
-       rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
-                                  qdev->primary_mac.addr_bytes);
-
-       if (rc) {
+       qede_set_ucast_cmn_params(&ucast);
+       ucast.opcode = ECORE_FILTER_REMOVE;
+       ucast.type = ECORE_FILTER_MAC;
+       ether_addr_copy(&qdev->primary_mac,
+                       (struct ether_addr *)&ucast.mac);
+       rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
+       if (rc != 0) {
                DP_ERR(edev, "Unable to remove current macaddr"
                             " Reverting to previous default mac\n");
                ether_addr_copy(&qdev->primary_mac,
@@ -318,18 +460,15 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
        }
 
        /* Add new MAC */
-       rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
-                                  mac_addr->addr_bytes);
-
-       if (rc)
+       ucast.opcode = ECORE_FILTER_ADD;
+       ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
+       rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
+       if (rc != 0)
                DP_ERR(edev, "Unable to add new default mac\n");
        else
                ether_addr_copy(mac_addr, &qdev->primary_mac);
 }
 
-
-
-
 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
 {
        struct ecore_dev *edev = &qdev->edev;
@@ -415,22 +554,6 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
                mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
 }
 
-static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
-                                 enum qed_filter_xcast_params_type opcode,
-                                 uint16_t vid)
-{
-       struct qed_filter_params filter_cmd;
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
-       memset(&filter_cmd, 0, sizeof(filter_cmd));
-       filter_cmd.type = QED_FILTER_TYPE_UCAST;
-       filter_cmd.filter.ucast.type = opcode;
-       filter_cmd.filter.ucast.vlan_valid = 1;
-       filter_cmd.filter.ucast.vlan = vid;
-
-       return qdev->ops->filter_config(edev, &filter_cmd);
-}
-
 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
                                uint16_t vlan_id, int on)
 {
@@ -439,6 +562,7 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
        struct qed_dev_eth_info *dev_info = &qdev->dev_info;
        struct qede_vlan_entry *tmp = NULL;
        struct qede_vlan_entry *vlan;
+       struct ecore_filter_ucast ucast;
        int rc;
 
        if (on) {
@@ -465,9 +589,13 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
                        return -ENOMEM;
                }
 
-               rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
-                                           vlan_id);
-               if (rc) {
+               qede_set_ucast_cmn_params(&ucast);
+               ucast.opcode = ECORE_FILTER_ADD;
+               ucast.type = ECORE_FILTER_VLAN;
+               ucast.vlan = vlan_id;
+               rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
+                                           NULL);
+               if (rc != 0) {
                        DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
                               rc);
                        rte_free(vlan);
@@ -497,9 +625,13 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
 
                SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
 
-               rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
-                                           vlan_id);
-               if (rc) {
+               qede_set_ucast_cmn_params(&ucast);
+               ucast.opcode = ECORE_FILTER_REMOVE;
+               ucast.type = ECORE_FILTER_VLAN;
+               ucast.vlan = vlan_id;
+               rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
+                                           NULL);
+               if (rc != 0) {
                        DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
                               vlan_id, rc);
                } else {
@@ -742,22 +874,6 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
        return ((curr->link_status == link.link_up) ? -1 : 0);
 }
 
-static void
-qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
-                    enum qed_filter_rx_mode_type accept_flags)
-{
-       struct qede_dev *qdev = eth_dev->data->dev_private;
-       struct ecore_dev *edev = &qdev->edev;
-       struct qed_filter_params rx_mode;
-
-       DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
-
-       memset(&rx_mode, 0, sizeof(struct qed_filter_params));
-       rx_mode.type = QED_FILTER_TYPE_RX_MODE;
-       rx_mode.filter.accept_flags = accept_flags;
-       qdev->ops->filter_config(edev, &rx_mode);
-}
-
 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
@@ -770,7 +886,7 @@ static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
        if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
                type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
 
-       qede_rx_mode_setting(eth_dev, type);
+       qed_configure_filter_rx_mode(eth_dev, type);
 }
 
 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
@@ -781,10 +897,11 @@ static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
        PMD_INIT_FUNC_TRACE(edev);
 
        if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
-               qede_rx_mode_setting(eth_dev,
-                                    QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
+               qed_configure_filter_rx_mode(eth_dev,
+                               QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
        else
-               qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
+               qed_configure_filter_rx_mode(eth_dev,
+                               QED_FILTER_RX_MODE_TYPE_REGULAR);
 }
 
 static void qede_poll_sp_sb_cb(void *param)
@@ -853,6 +970,7 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
        struct ecore_dev *edev = &qdev->edev;
        struct ecore_eth_stats stats;
        unsigned int i = 0, j = 0, qid;
+       unsigned int rxq_stat_cntrs, txq_stat_cntrs;
        struct qede_tx_queue *txq;
 
        qdev->ops->get_vport_stats(edev, &stats);
@@ -886,6 +1004,17 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
        eth_stats->oerrors = stats.tx_err_drop_pkts;
 
        /* Queue stats */
+       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+                              RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+                              RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) ||
+           (txq_stat_cntrs != QEDE_TSS_COUNT(qdev)))
+               DP_VERBOSE(edev, ECORE_MSG_DEBUG,
+                      "Not all the queue stats will be displayed. Set"
+                      " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
+                      " appropriately and retry.\n");
+
        for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
                if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
                        eth_stats->q_ipackets[i] =
@@ -904,7 +1033,11 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
                                        rx_alloc_errors));
                        i++;
                }
+               if (i == rxq_stat_cntrs)
+                       break;
+       }
 
+       for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
                if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
                        txq = qdev->fp_array[(qid)].txqs[0];
                        eth_stats->q_opackets[j] =
@@ -914,13 +1047,17 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
                                                  xmit_pkts)));
                        j++;
                }
+               if (j == txq_stat_cntrs)
+                       break;
        }
 }
 
 static unsigned
 qede_get_xstats_count(struct qede_dev *qdev) {
        return RTE_DIM(qede_xstats_strings) +
-               (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev));
+               (RTE_DIM(qede_rxq_xstats_strings) *
+                RTE_MIN(QEDE_RSS_COUNT(qdev),
+                        RTE_ETHDEV_QUEUE_STAT_CNTRS));
 }
 
 static int
@@ -930,6 +1067,7 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
        struct qede_dev *qdev = dev->data->dev_private;
        const unsigned int stat_cnt = qede_get_xstats_count(qdev);
        unsigned int i, qid, stat_idx = 0;
+       unsigned int rxq_stat_cntrs;
 
        if (xstats_names != NULL) {
                for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
@@ -940,7 +1078,9 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
                        stat_idx++;
                }
 
-               for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) {
+               rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+                                        RTE_ETHDEV_QUEUE_STAT_CNTRS);
+               for (qid = 0; qid < rxq_stat_cntrs; qid++) {
                        for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
                                snprintf(xstats_names[stat_idx].name,
                                        sizeof(xstats_names[stat_idx].name),
@@ -964,6 +1104,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        struct ecore_eth_stats stats;
        const unsigned int num = qede_get_xstats_count(qdev);
        unsigned int i, qid, stat_idx = 0;
+       unsigned int rxq_stat_cntrs;
 
        if (n < num)
                return num;
@@ -973,15 +1114,19 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
                xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
                                             qede_xstats_strings[i].offset);
+               xstats[stat_idx].id = stat_idx;
                stat_idx++;
        }
 
-       for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       for (qid = 0; qid < rxq_stat_cntrs; qid++) {
                if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
                        for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
                                xstats[stat_idx].value = *(uint64_t *)(
                                        ((char *)(qdev->fp_array[(qid)].rxq)) +
                                         qede_rxq_xstats_strings[i].offset);
+                               xstats[stat_idx].id = stat_idx;
                                stat_idx++;
                        }
                }
@@ -1042,15 +1187,17 @@ static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
        if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
                type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
 
-       qede_rx_mode_setting(eth_dev, type);
+       qed_configure_filter_rx_mode(eth_dev, type);
 }
 
 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
 {
        if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
-               qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
+               qed_configure_filter_rx_mode(eth_dev,
+                               QED_FILTER_RX_MODE_TYPE_PROMISC);
        else
-               qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
+               qed_configure_filter_rx_mode(eth_dev,
+                               QED_FILTER_RX_MODE_TYPE_REGULAR);
 }
 
 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
@@ -1424,6 +1571,10 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
 
+       /* @DPDK */
+       edev->vendor_id = pci_dev->id.vendor_id;
+       edev->device_id = pci_dev->id.device_id;
+
        qed_ops = qed_get_eth_ops();
        if (!qed_ops) {
                DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
index a97e3d9..a35ea8b 100644 (file)
@@ -123,6 +123,17 @@ struct qede_vlan_entry {
        uint16_t vid;
 };
 
+struct qede_mcast_entry {
+       struct ether_addr mac;
+       SLIST_ENTRY(qede_mcast_entry) list;
+};
+
+struct qede_ucast_entry {
+       struct ether_addr mac;
+       uint16_t vlan;
+       SLIST_ENTRY(qede_ucast_entry) list;
+};
+
 /*
  *  Structure to store private data for each port.
  */
@@ -147,6 +158,10 @@ struct qede_dev {
        uint16_t configured_vlans;
        bool accept_any_vlan;
        struct ether_addr primary_mac;
+       SLIST_HEAD(mc_list_head, qede_mcast_entry) mc_list_head;
+       uint16_t num_mc_addr;
+       SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
+       uint16_t num_uc_addr;
        bool handle_hw_err;
        char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
 };
index ab22409..b666e1c 100644 (file)
@@ -137,6 +137,7 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
 
        if (fstat(fd, &st) < 0) {
                DP_NOTICE(edev, false, "Can't stat firmware file\n");
+               close(fd);
                return -1;
        }
 
@@ -158,9 +159,11 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
        if (edev->fw_len < 104) {
                DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n",
                          edev->fw_len);
+               close(fd);
                return -EINVAL;
        }
 
+       close(fd);
        return 0;
 }
 #endif
index 2e181c8..a34b665 100644 (file)
@@ -435,13 +435,15 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
        struct ecore_dev *edev = &qdev->edev;
        struct qede_fastpath *fp;
        uint32_t num_sbs;
-       int rc, i;
+       uint16_t i;
+       uint16_t sb_idx;
+       int rc;
 
        if (IS_VF(edev))
                ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
        else
-               num_sbs = (ecore_cxt_get_proto_cid_count
-                         (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL)) / 2;
+               num_sbs = ecore_cxt_get_proto_cid_count
+                         (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
 
        if (num_sbs == 0) {
                DP_ERR(edev, "No status blocks available\n");
@@ -459,7 +461,11 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
 
        for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
                fp = &qdev->fp_array[i];
-               if (qede_alloc_mem_sb(qdev, fp->sb_info, i % num_sbs)) {
+               if (IS_VF(edev))
+                       sb_idx = i % num_sbs;
+               else
+                       sb_idx = i;
+               if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
                        qede_free_fp_arrays(qdev);
                        return -ENOMEM;
                }
index 766d4ef..328dde0 100644 (file)
@@ -115,9 +115,6 @@ struct pmd_internal {
        char *dev_name;
        char *iface_name;
        uint16_t max_queues;
-       uint64_t flags;
-
-       volatile uint16_t once;
 };
 
 struct internal_list {
@@ -324,6 +321,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                                *(uint64_t *)(((char *)vq)
                                + vhost_rxport_stat_strings[t].offset);
                }
+               xstats[count].id = count;
                count++;
        }
        for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
@@ -336,6 +334,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                                *(uint64_t *)(((char *)vq)
                                + vhost_txport_stat_strings[t].offset);
                }
+               xstats[count].id = count;
                count++;
        }
        return count;
@@ -774,35 +773,40 @@ vhost_driver_session_stop(void)
 }
 
 static int
-eth_dev_start(struct rte_eth_dev *dev)
+eth_dev_start(struct rte_eth_dev *dev __rte_unused)
 {
-       struct pmd_internal *internal = dev->data->dev_private;
-       int ret = 0;
-
-       if (rte_atomic16_cmpset(&internal->once, 0, 1)) {
-               ret = rte_vhost_driver_register(internal->iface_name,
-                                               internal->flags);
-               if (ret)
-                       return ret;
-       }
-
-       /* We need only one message handling thread */
-       if (rte_atomic16_add_return(&nb_started_ports, 1) == 1)
-               ret = vhost_driver_session_start();
+       return 0;
+}
 
-       return ret;
+static void
+eth_dev_stop(struct rte_eth_dev *dev __rte_unused)
+{
 }
 
 static void
-eth_dev_stop(struct rte_eth_dev *dev)
+eth_dev_close(struct rte_eth_dev *dev)
 {
-       struct pmd_internal *internal = dev->data->dev_private;
+       struct pmd_internal *internal;
+       struct internal_list *list;
+
+       internal = dev->data->dev_private;
+       if (!internal)
+               return;
 
-       if (rte_atomic16_cmpset(&internal->once, 1, 0))
-               rte_vhost_driver_unregister(internal->iface_name);
+       rte_vhost_driver_unregister(internal->iface_name);
 
-       if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
-               vhost_driver_session_stop();
+       list = find_internal_resource(internal->iface_name);
+       if (!list)
+               return;
+
+       pthread_mutex_lock(&internal_list_lock);
+       TAILQ_REMOVE(&internal_list, list, next);
+       pthread_mutex_unlock(&internal_list_lock);
+       rte_free(list);
+
+       free(internal->dev_name);
+       free(internal->iface_name);
+       rte_free(internal);
 }
 
 static int
@@ -973,6 +977,7 @@ rte_eth_vhost_feature_get(void)
 static const struct eth_dev_ops ops = {
        .dev_start = eth_dev_start,
        .dev_stop = eth_dev_stop,
+       .dev_close = eth_dev_close,
        .dev_configure = eth_dev_configure,
        .dev_infos_get = eth_dev_info,
        .rx_queue_setup = eth_rx_queue_setup,
@@ -1046,7 +1051,6 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
        internal->iface_name = strdup(iface_name);
        if (internal->iface_name == NULL)
                goto error;
-       internal->flags = flags;
 
        list->eth_dev = eth_dev;
        pthread_mutex_lock(&internal_list_lock);
@@ -1081,6 +1085,15 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
        eth_dev->rx_pkt_burst = eth_vhost_rx;
        eth_dev->tx_pkt_burst = eth_vhost_tx;
 
+       if (rte_vhost_driver_register(iface_name, flags))
+               goto error;
+
+       /* We need only one message handling thread */
+       if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) {
+               if (vhost_driver_session_start())
+                       goto error;
+       }
+
        return data->port_id;
 
 error:
@@ -1192,8 +1205,6 @@ static int
 rte_pmd_vhost_remove(const char *name)
 {
        struct rte_eth_dev *eth_dev = NULL;
-       struct pmd_internal *internal;
-       struct internal_list *list;
        unsigned int i;
 
        RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
@@ -1203,27 +1214,16 @@ rte_pmd_vhost_remove(const char *name)
        if (eth_dev == NULL)
                return -ENODEV;
 
-       internal = eth_dev->data->dev_private;
-       if (internal == NULL)
-               return -ENODEV;
-
-       list = find_internal_resource(internal->iface_name);
-       if (list == NULL)
-               return -ENODEV;
+       eth_dev_stop(eth_dev);
 
-       pthread_mutex_lock(&internal_list_lock);
-       TAILQ_REMOVE(&internal_list, list, next);
-       pthread_mutex_unlock(&internal_list_lock);
-       rte_free(list);
+       eth_dev_close(eth_dev);
 
-       eth_dev_stop(eth_dev);
+       if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
+               vhost_driver_session_stop();
 
        rte_free(vring_states[eth_dev->data->port_id]);
        vring_states[eth_dev->data->port_id] = NULL;
 
-       free(internal->dev_name);
-       free(internal->iface_name);
-
        for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
                rte_free(eth_dev->data->rx_queues[i]);
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
@@ -1231,7 +1231,6 @@ rte_pmd_vhost_remove(const char *name)
 
        rte_free(eth_dev->data->mac_addrs);
        rte_free(eth_dev->data);
-       rte_free(internal);
 
        rte_eth_dev_release_port(eth_dev);
 
index 079fd6c..f5961ab 100644 (file)
@@ -152,6 +152,8 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
                            sizeof(rte_virtio_txq_stat_strings[0]))
 
+struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
+
 static int
 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
                int *dlen, int pkt_num)
@@ -360,7 +362,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
         * Read the virtqueue size from the Queue Size field
         * Always power of 2 and if 0 virtqueue does not exist
         */
-       vq_size = hw->vtpci_ops->get_queue_num(hw, vtpci_queue_idx);
+       vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
        PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
        if (vq_size == 0) {
                PMD_INIT_LOG(ERR, "virtqueue does not exist");
@@ -519,7 +521,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
                }
        }
 
-       if (hw->vtpci_ops->setup_queue(hw, vq) < 0) {
+       if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
                PMD_INIT_LOG(ERR, "setup_queue failed");
                return -EINVAL;
        }
@@ -893,6 +895,7 @@ virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
                        xstats[count].value = *(uint64_t *)(((char *)rxvq) +
                                rte_virtio_rxq_stat_strings[t].offset);
+                       xstats[count].id = count;
                        count++;
                }
        }
@@ -908,6 +911,7 @@ virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
                        xstats[count].value = *(uint64_t *)(((char *)txvq) +
                                rte_virtio_txq_stat_strings[t].offset);
+                       xstats[count].id = count;
                        count++;
                }
        }
@@ -1114,7 +1118,7 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
                req_features);
 
        /* Read device(host) feature bits */
-       host_features = hw->vtpci_ops->get_features(hw);
+       host_features = VTPCI_OPS(hw)->get_features(hw);
        PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
                host_features);
 
@@ -1204,14 +1208,14 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
        if (virtio_negotiate_features(hw, req_features) < 0)
                return -1;
 
+       rte_eth_copy_pci_info(eth_dev, pci_dev);
+
        /* If host does not support status then disable LSC */
        if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
                eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
        else
                eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
 
-       rte_eth_copy_pci_info(eth_dev, pci_dev);
-
        rx_func_get(eth_dev);
 
        /* Setting up rx_header size for the device */
@@ -1286,6 +1290,52 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
        return 0;
 }
 
+/*
+ * Remap the PCI device again (IO port map for legacy device and
+ * memory map for modern device), so that the secondary process
+ * could have the PCI initiated correctly.
+ */
+static int
+virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
+{
+       if (hw->modern) {
+               /*
+                * We don't have to re-parse the PCI config space, since
+                * rte_eal_pci_map_device() makes sure the mapped address
+                * in secondary process would equal to the one mapped in
+                * the primary process: error will be returned if that
+                * requirement is not met.
+                *
+                * That said, we could simply reuse all cap pointers
+                * (such as dev_cfg, common_cfg, etc.) parsed from the
+                * primary process, which is stored in shared memory.
+                */
+               if (rte_eal_pci_map_device(pci_dev)) {
+                       PMD_INIT_LOG(DEBUG, "failed to map pci device!");
+                       return -1;
+               }
+       } else {
+               if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
+
+static void
+virtio_set_vtpci_ops(struct virtio_hw *hw)
+{
+#ifdef RTE_VIRTIO_USER
+       if (hw->virtio_user_dev)
+               VTPCI_OPS(hw) = &virtio_user_ops;
+       else
+#endif
+       if (hw->modern)
+               VTPCI_OPS(hw) = &modern_ops;
+       else
+               VTPCI_OPS(hw) = &legacy_ops;
+}
+
 /*
  * This function is based on probe() function in virtio_pci.c
  * It returns 0 on success.
@@ -1304,7 +1354,19 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
        eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
 
        if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
-               rx_func_get(eth_dev);
+               if (!hw->virtio_user_dev) {
+                       ret = virtio_remap_pci(eth_dev->pci_dev, hw);
+                       if (ret)
+                               return ret;
+               }
+
+               virtio_set_vtpci_ops(hw);
+               if (hw->use_simple_rxtx) {
+                       eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+                       eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+               } else {
+                       rx_func_get(eth_dev);
+               }
                return 0;
        }
 
@@ -1318,6 +1380,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        pci_dev = eth_dev->pci_dev;
+       hw->port_id = eth_dev->data->port_id;
 
        if (pci_dev) {
                ret = vtpci_init(pci_dev, hw, &dev_flags);
index 27d9a19..4feccf9 100644 (file)
         1ULL << VIRTIO_F_VERSION_1       |     \
         1ULL << VIRTIO_F_IOMMU_PLATFORM)
 
+#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES    \
+       (VIRTIO_PMD_DEFAULT_GUEST_FEATURES |    \
+        1u << VIRTIO_NET_F_GUEST_CSUM     |    \
+        1u << VIRTIO_NET_F_GUEST_TSO4     |    \
+        1u << VIRTIO_NET_F_GUEST_TSO6)
 /*
  * CQ function prototype
  */
index 9b47165..8d5355c 100644 (file)
@@ -92,17 +92,17 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
        while (length > 0) {
                if (length >= 4) {
                        size = 4;
-                       rte_eal_pci_ioport_read(&hw->io, dst, size,
+                       rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
                                VIRTIO_PCI_CONFIG(hw) + offset);
                        *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
                } else if (length >= 2) {
                        size = 2;
-                       rte_eal_pci_ioport_read(&hw->io, dst, size,
+                       rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
                                VIRTIO_PCI_CONFIG(hw) + offset);
                        *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
                } else {
                        size = 1;
-                       rte_eal_pci_ioport_read(&hw->io, dst, size,
+                       rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
                                VIRTIO_PCI_CONFIG(hw) + offset);
                }
 
@@ -111,7 +111,7 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
                length -= size;
        }
 #else
-       rte_eal_pci_ioport_read(&hw->io, dst, length,
+       rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, length,
                                VIRTIO_PCI_CONFIG(hw) + offset);
 #endif
 }
@@ -131,16 +131,16 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
                if (length >= 4) {
                        size = 4;
                        tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src);
-                       rte_eal_pci_ioport_write(&hw->io, &tmp.u32, size,
+                       rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
                                VIRTIO_PCI_CONFIG(hw) + offset);
                } else if (length >= 2) {
                        size = 2;
                        tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src);
-                       rte_eal_pci_ioport_write(&hw->io, &tmp.u16, size,
+                       rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
                                VIRTIO_PCI_CONFIG(hw) + offset);
                } else {
                        size = 1;
-                       rte_eal_pci_ioport_write(&hw->io, src, size,
+                       rte_eal_pci_ioport_write(VTPCI_IO(hw), src, size,
                                VIRTIO_PCI_CONFIG(hw) + offset);
                }
 
@@ -149,7 +149,7 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
                length -= size;
        }
 #else
-       rte_eal_pci_ioport_write(&hw->io, src, length,
+       rte_eal_pci_ioport_write(VTPCI_IO(hw), src, length,
                                 VIRTIO_PCI_CONFIG(hw) + offset);
 #endif
 }
@@ -159,7 +159,8 @@ legacy_get_features(struct virtio_hw *hw)
 {
        uint32_t dst;
 
-       rte_eal_pci_ioport_read(&hw->io, &dst, 4, VIRTIO_PCI_HOST_FEATURES);
+       rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 4,
+                               VIRTIO_PCI_HOST_FEATURES);
        return dst;
 }
 
@@ -171,7 +172,7 @@ legacy_set_features(struct virtio_hw *hw, uint64_t features)
                        "only 32 bit features are allowed for legacy virtio!");
                return;
        }
-       rte_eal_pci_ioport_write(&hw->io, &features, 4,
+       rte_eal_pci_ioport_write(VTPCI_IO(hw), &features, 4,
                                 VIRTIO_PCI_GUEST_FEATURES);
 }
 
@@ -180,14 +181,14 @@ legacy_get_status(struct virtio_hw *hw)
 {
        uint8_t dst;
 
-       rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_STATUS);
+       rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
        return dst;
 }
 
 static void
 legacy_set_status(struct virtio_hw *hw, uint8_t status)
 {
-       rte_eal_pci_ioport_write(&hw->io, &status, 1, VIRTIO_PCI_STATUS);
+       rte_eal_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
 }
 
 static void
@@ -201,7 +202,7 @@ legacy_get_isr(struct virtio_hw *hw)
 {
        uint8_t dst;
 
-       rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_ISR);
+       rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
        return dst;
 }
 
@@ -211,8 +212,10 @@ legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
 {
        uint16_t dst;
 
-       rte_eal_pci_ioport_write(&hw->io, &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
-       rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
+       rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2,
+                                VIRTIO_MSI_CONFIG_VECTOR);
+       rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2,
+                               VIRTIO_MSI_CONFIG_VECTOR);
        return dst;
 }
 
@@ -221,8 +224,9 @@ legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
 {
        uint16_t dst;
 
-       rte_eal_pci_ioport_write(&hw->io, &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
-       rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_PCI_QUEUE_NUM);
+       rte_eal_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2,
+                                VIRTIO_PCI_QUEUE_SEL);
+       rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
        return dst;
 }
 
@@ -234,10 +238,10 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
        if (!check_vq_phys_addr_ok(vq))
                return -1;
 
-       rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,
+       rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
                         VIRTIO_PCI_QUEUE_SEL);
        src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
-       rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN);
+       rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
 
        return 0;
 }
@@ -247,15 +251,15 @@ legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
 {
        uint32_t src = 0;
 
-       rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,
+       rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
                         VIRTIO_PCI_QUEUE_SEL);
-       rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN);
+       rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
 }
 
 static void
 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
 {
-       rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,
+       rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
                         VIRTIO_PCI_QUEUE_NOTIFY);
 }
 
@@ -289,7 +293,7 @@ static int
 legacy_virtio_resource_init(struct rte_pci_device *pci_dev,
                            struct virtio_hw *hw, uint32_t *dev_flags)
 {
-       if (rte_eal_pci_ioport_map(pci_dev, 0, &hw->io) < 0)
+       if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
                return -1;
 
        if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN)
@@ -300,7 +304,7 @@ legacy_virtio_resource_init(struct rte_pci_device *pci_dev,
        return 0;
 }
 
-static const struct virtio_pci_ops legacy_ops = {
+const struct virtio_pci_ops legacy_ops = {
        .read_dev_cfg   = legacy_read_dev_config,
        .write_dev_cfg  = legacy_write_dev_config,
        .reset          = legacy_reset,
@@ -516,7 +520,7 @@ modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
        io_write16(1, vq->notify_addr);
 }
 
-static const struct virtio_pci_ops modern_ops = {
+const struct virtio_pci_ops modern_ops = {
        .read_dev_cfg   = modern_read_dev_config,
        .write_dev_cfg  = modern_write_dev_config,
        .reset          = modern_reset,
@@ -537,14 +541,14 @@ void
 vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
                      void *dst, int length)
 {
-       hw->vtpci_ops->read_dev_cfg(hw, offset, dst, length);
+       VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
 }
 
 void
 vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
                       const void *src, int length)
 {
-       hw->vtpci_ops->write_dev_cfg(hw, offset, src, length);
+       VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
 }
 
 uint64_t
@@ -557,7 +561,7 @@ vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
         * host all support.
         */
        features = host_features & hw->guest_features;
-       hw->vtpci_ops->set_features(hw, features);
+       VTPCI_OPS(hw)->set_features(hw, features);
 
        return features;
 }
@@ -565,9 +569,9 @@ vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
 void
 vtpci_reset(struct virtio_hw *hw)
 {
-       hw->vtpci_ops->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+       VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
        /* flush status write */
-       hw->vtpci_ops->get_status(hw);
+       VTPCI_OPS(hw)->get_status(hw);
 }
 
 void
@@ -580,21 +584,21 @@ void
 vtpci_set_status(struct virtio_hw *hw, uint8_t status)
 {
        if (status != VIRTIO_CONFIG_STATUS_RESET)
-               status |= hw->vtpci_ops->get_status(hw);
+               status |= VTPCI_OPS(hw)->get_status(hw);
 
-       hw->vtpci_ops->set_status(hw, status);
+       VTPCI_OPS(hw)->set_status(hw, status);
 }
 
 uint8_t
 vtpci_get_status(struct virtio_hw *hw)
 {
-       return hw->vtpci_ops->get_status(hw);
+       return VTPCI_OPS(hw)->get_status(hw);
 }
 
 uint8_t
 vtpci_isr(struct virtio_hw *hw)
 {
-       return hw->vtpci_ops->get_isr(hw);
+       return VTPCI_OPS(hw)->get_isr(hw);
 }
 
 
@@ -602,7 +606,7 @@ vtpci_isr(struct virtio_hw *hw)
 uint16_t
 vtpci_irq_config(struct virtio_hw *hw, uint16_t vec)
 {
-       return hw->vtpci_ops->set_config_irq(hw, vec);
+       return VTPCI_OPS(hw)->set_config_irq(hw, vec);
 }
 
 static void *
@@ -736,8 +740,8 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
         */
        if (virtio_read_caps(dev, hw) == 0) {
                PMD_INIT_LOG(INFO, "modern virtio pci detected.");
-               hw->vtpci_ops = &modern_ops;
-               hw->modern    = 1;
+               virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
+               hw->modern = 1;
                *dev_flags |= RTE_ETH_DEV_INTR_LSC;
                return 0;
        }
@@ -755,7 +759,7 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
                return -1;
        }
 
-       hw->vtpci_ops = &legacy_ops;
+       virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
        hw->use_msix = legacy_virtio_has_msix(&dev->addr);
        hw->modern   = 0;
 
index de271bf..511a1c8 100644 (file)
@@ -245,7 +245,6 @@ struct virtio_net_config;
 
 struct virtio_hw {
        struct virtnet_ctl *cvq;
-       struct rte_pci_ioport io;
        uint64_t    req_guest_features;
        uint64_t    guest_features;
        uint32_t    max_queue_pairs;
@@ -254,6 +253,7 @@ struct virtio_hw {
        uint8_t     use_msix;
        uint8_t     modern;
        uint8_t     use_simple_rxtx;
+       uint8_t     port_id;
        uint8_t     mac_addr[ETHER_ADDR_LEN];
        uint32_t    notify_off_multiplier;
        uint8_t     *isr;
@@ -261,12 +261,28 @@ struct virtio_hw {
        struct rte_pci_device *dev;
        struct virtio_pci_common_cfg *common_cfg;
        struct virtio_net_config *dev_cfg;
-       const struct virtio_pci_ops *vtpci_ops;
        void        *virtio_user_dev;
 
        struct virtqueue **vqs;
 };
 
+
+/*
+ * While virtio_hw is stored in shared memory, this structure stores
+ * some infos that may vary in the multiple process model locally.
+ * For example, the vtpci_ops pointer.
+ */
+struct virtio_hw_internal {
+       const struct virtio_pci_ops *vtpci_ops;
+       struct rte_pci_ioport io;
+};
+
+#define VTPCI_OPS(hw)  (virtio_hw_internal[(hw)->port_id].vtpci_ops)
+#define VTPCI_IO(hw)   (&virtio_hw_internal[(hw)->port_id].io)
+
+extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
+
+
 /*
  * This structure is just a reference to read
  * net device specific config space; it just a chodu structure
@@ -317,4 +333,8 @@ uint8_t vtpci_isr(struct virtio_hw *);
 
 uint16_t vtpci_irq_config(struct virtio_hw *, uint16_t);
 
+extern const struct virtio_pci_ops legacy_ops;
+extern const struct virtio_pci_ops modern_ops;
+extern const struct virtio_pci_ops virtio_user_ops;
+
 #endif /* _VIRTIO_PCI_H_ */
index 22d97a4..a33ef1a 100644 (file)
@@ -258,6 +258,12 @@ tx_offload_enabled(struct virtio_hw *hw)
                vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
 }
 
+/* avoid write operation when necessary, to lessen cache issues */
+#define ASSIGN_UNLESS_EQUAL(var, val) do {     \
+       if ((var) != (val))                     \
+               (var) = (val);                  \
+} while (0)
+
 static inline void
 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                       uint16_t needed, int use_indirect, int can_push)
@@ -286,8 +292,14 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                hdr = (struct virtio_net_hdr *)
                        rte_pktmbuf_prepend(cookie, head_size);
                /* if offload disabled, it is not zeroed below, do it now */
-               if (offload == 0)
-                       memset(hdr, 0, head_size);
+               if (offload == 0) {
+                       ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+               }
        } else if (use_indirect) {
                /* setup tx ring slot to point to indirect
                 * descriptor list stored in reserved region.
@@ -337,9 +349,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                        break;
 
                default:
-                       hdr->csum_start = 0;
-                       hdr->csum_offset = 0;
-                       hdr->flags = 0;
+                       ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
                        break;
                }
 
@@ -355,9 +367,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                                cookie->l3_len +
                                cookie->l4_len;
                } else {
-                       hdr->gso_type = 0;
-                       hdr->gso_size = 0;
-                       hdr->hdr_len = 0;
+                       ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+                       ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
                }
        }
 
index e239e0e..a38398b 100644 (file)
@@ -148,12 +148,13 @@ virtio_user_start_device(struct virtio_user_dev *dev)
 
        /* Step 1: set features
         * Make sure VHOST_USER_F_PROTOCOL_FEATURES is added if mq is enabled,
-        * and VIRTIO_NET_F_MAC is stripped.
+        * VIRTIO_NET_F_MAC and VIRTIO_NET_F_CTRL_VQ is stripped.
         */
        features = dev->features;
        if (dev->max_queue_pairs > 1)
                features |= VHOST_USER_MQ;
        features &= ~(1ull << VIRTIO_NET_F_MAC);
+       features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
        ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_FEATURES, &features);
        if (ret < 0)
                goto error;
@@ -181,7 +182,17 @@ error:
 
 int virtio_user_stop_device(struct virtio_user_dev *dev)
 {
-       return vhost_user_sock(dev->vhostfd, VHOST_USER_RESET_OWNER, NULL);
+       uint32_t i;
+
+       for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+               close(dev->callfds[i]);
+               close(dev->kickfds[i]);
+       }
+
+       for (i = 0; i < dev->max_queue_pairs; ++i)
+               vhost_user_enable_queue_pair(dev->vhostfd, i, 0);
+
+       return 0;
 }
 
 static inline void
@@ -209,6 +220,8 @@ int
 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
                     int cq, int queue_size, const char *mac)
 {
+       uint32_t i;
+
        snprintf(dev->path, PATH_MAX, "%s", path);
        dev->max_queue_pairs = queues;
        dev->queue_pairs = 1; /* mq disabled by default */
@@ -217,6 +230,11 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
        parse_mac(dev, mac);
        dev->vhostfd = -1;
 
+       for (i = 0; i < VIRTIO_MAX_VIRTQUEUES * 2 + 1; ++i) {
+               dev->kickfds[i] = -1;
+               dev->callfds[i] = -1;
+       }
+
        dev->vhostfd = vhost_user_setup(dev->path);
        if (dev->vhostfd < 0) {
                PMD_INIT_LOG(ERR, "backend set up fails");
@@ -228,29 +246,26 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
        }
 
        if (vhost_user_sock(dev->vhostfd, VHOST_USER_GET_FEATURES,
-                           &dev->features) < 0) {
+                           &dev->device_features) < 0) {
                PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
                return -1;
        }
        if (dev->mac_specified)
-               dev->features |= (1ull << VIRTIO_NET_F_MAC);
+               dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
 
-       if (!cq) {
-               dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
-               /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */
-               dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
-               dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
-               dev->features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
-               dev->features &= ~(1ull << VIRTIO_NET_F_MQ);
-               dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
-       } else {
-               /* vhost user backend does not need to know ctrl-q, so
-                * actually we need add this bit into features. However,
-                * DPDK vhost-user does send features with this bit, so we
-                * check it instead of OR it for now.
+       if (cq) {
+               /* device does not really need to know anything about CQ,
+                * so if necessary, we just claim to support CQ
                 */
-               if (!(dev->features & (1ull << VIRTIO_NET_F_CTRL_VQ)))
-                       PMD_INIT_LOG(INFO, "vhost does not support ctrl-q");
+               dev->device_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+       } else {
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
+               /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
+               dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
        }
 
        if (dev->max_queue_pairs > 1) {
@@ -266,13 +281,6 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
 void
 virtio_user_dev_uninit(struct virtio_user_dev *dev)
 {
-       uint32_t i;
-
-       for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
-               close(dev->callfds[i]);
-               close(dev->kickfds[i]);
-       }
-
        close(dev->vhostfd);
 }
 
index 33690b5..28fc788 100644 (file)
@@ -46,7 +46,10 @@ struct virtio_user_dev {
        uint32_t        max_queue_pairs;
        uint32_t        queue_pairs;
        uint32_t        queue_size;
-       uint64_t        features;
+       uint64_t        features; /* the negotiated features with driver,
+                                  * and will be sync with device
+                                  */
+       uint64_t        device_features; /* supported features by device */
        uint8_t         status;
        uint8_t         mac_addr[ETHER_ADDR_LEN];
        char            path[PATH_MAX];
index 406beea..013600e 100644 (file)
@@ -87,21 +87,24 @@ virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
 }
 
 static void
-virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
+virtio_user_reset(struct virtio_hw *hw)
 {
        struct virtio_user_dev *dev = virtio_user_get_dev(hw);
 
-       if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
-               virtio_user_start_device(dev);
-       dev->status = status;
+       if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
+               virtio_user_stop_device(dev);
 }
 
 static void
-virtio_user_reset(struct virtio_hw *hw)
+virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
 {
        struct virtio_user_dev *dev = virtio_user_get_dev(hw);
 
-       virtio_user_stop_device(dev);
+       if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
+               virtio_user_start_device(dev);
+       else if (status == VIRTIO_CONFIG_STATUS_RESET)
+               virtio_user_reset(hw);
+       dev->status = status;
 }
 
 static uint8_t
@@ -117,7 +120,8 @@ virtio_user_get_features(struct virtio_hw *hw)
 {
        struct virtio_user_dev *dev = virtio_user_get_dev(hw);
 
-       return dev->features;
+       /* unmask feature bits defined in vhost user protocol */
+       return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES;
 }
 
 static void
@@ -125,7 +129,7 @@ virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
 {
        struct virtio_user_dev *dev = virtio_user_get_dev(hw);
 
-       dev->features = features;
+       dev->features = features & dev->device_features;
 }
 
 static uint8_t
@@ -212,7 +216,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
                            strerror(errno));
 }
 
-static const struct virtio_pci_ops virtio_user_ops = {
+const struct virtio_pci_ops virtio_user_ops = {
        .read_dev_cfg   = virtio_user_read_dev_config,
        .write_dev_cfg  = virtio_user_write_dev_config,
        .reset          = virtio_user_reset,
@@ -301,7 +305,8 @@ virtio_user_eth_dev_alloc(const char *name)
                return NULL;
        }
 
-       hw->vtpci_ops = &virtio_user_ops;
+       hw->port_id = data->port_id;
+       virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
        hw->use_msix = 0;
        hw->modern   = 0;
        hw->use_simple_rxtx = 0;
index f0bb089..b1070e0 100644 (file)
@@ -330,7 +330,7 @@ virtqueue_notify(struct virtqueue *vq)
         * For virtio on IA, the notificaiton is through io port operation
         * which is a serialization instruction itself.
         */
-       vq->hw->vtpci_ops->notify_queue(vq->hw, vq);
+       VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
 }
 
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
index b109168..93db10f 100644 (file)
@@ -518,6 +518,32 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        return nb_tx;
 }
 
+static inline void
+vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
+                  struct rte_mbuf *mbuf)
+{
+       uint32_t val = 0;
+       struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
+       struct Vmxnet3_RxDesc *rxd =
+               (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
+       vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
+
+       if (ring_id == 0)
+               val = VMXNET3_RXD_BTYPE_HEAD;
+       else
+               val = VMXNET3_RXD_BTYPE_BODY;
+
+       buf_info->m = mbuf;
+       buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
+       buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+
+       rxd->addr = buf_info->bufPA;
+       rxd->btype = val;
+       rxd->len = buf_info->len;
+       rxd->gen = ring->gen;
+
+       vmxnet3_cmd_ring_adv_next2fill(ring);
+}
 /*
  *  Allocates mbufs and clusters. Post rx descriptors with buffer details
  *  so that device can receive packets in those buffers.
@@ -657,9 +683,18 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        }
 
        while (rcd->gen == rxq->comp_ring.gen) {
+               struct rte_mbuf *newm;
+
                if (nb_rx >= nb_pkts)
                        break;
 
+               newm = rte_mbuf_raw_alloc(rxq->mp);
+               if (unlikely(newm == NULL)) {
+                       PMD_RX_LOG(ERR, "Error allocating mbuf");
+                       rxq->stats.rx_buf_alloc_failure++;
+                       break;
+               }
+
                idx = rcd->rxdIdx;
                ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
                rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
@@ -759,8 +794,8 @@ rcd_done:
                VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
                                          rxq->cmd_ring[ring_idx].size);
 
-               /* It's time to allocate some new buf and renew descriptors */
-               vmxnet3_post_rx_bufs(rxq, ring_idx);
+               /* It's time to renew descriptors */
+               vmxnet3_renew_desc(rxq, ring_idx, newm);
                if (unlikely(rxq->shared->ctrl.updateRxProd)) {
                        VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
                                               rxq->cmd_ring[ring_idx].next2fill);
index 38e466c..6aeaa06 100644 (file)
@@ -177,6 +177,7 @@ pcmd_drvinfo_callback(__rte_unused void *ptr_params,
        int id_port;
 
        for (id_port = 0; id_port < rte_eth_dev_count(); id_port++) {
+               memset(&info, 0, sizeof(info));
                if (rte_ethtool_get_drvinfo(id_port, &info)) {
                        printf("Error getting info for port %i\n", id_port);
                        return;
index a1f91d4..6f0ce84 100644 (file)
@@ -61,10 +61,15 @@ rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo)
                dev_info.driver_name);
        snprintf(drvinfo->version, sizeof(drvinfo->version), "%s",
                rte_version());
-       snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
-               "%04x:%02x:%02x.%x",
-               dev_info.pci_dev->addr.domain, dev_info.pci_dev->addr.bus,
-               dev_info.pci_dev->addr.devid, dev_info.pci_dev->addr.function);
+       if (dev_info.pci_dev)
+               snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
+                       "%04x:%02x:%02x.%x",
+                       dev_info.pci_dev->addr.domain,
+                       dev_info.pci_dev->addr.bus,
+                       dev_info.pci_dev->addr.devid,
+                       dev_info.pci_dev->addr.function);
+       else
+               snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info), "N/A");
 
        memset(&reg_info, 0, sizeof(reg_info));
        rte_eth_dev_get_reg_info(port_id, &reg_info);
index f8b84e0..e41290e 100644 (file)
@@ -491,6 +491,9 @@ struct app_eal_params {
 #define APP_THREAD_HEADROOM_STATS_COLLECT        1
 #endif
 
+#define APP_CORE_MASK_SIZE                                     \
+       (RTE_MAX_LCORE / 64 + ((RTE_MAX_LCORE % 64) ? 1 : 0))
+
 struct app_params {
        /* Config */
        char app_name[APP_APPNAME_SIZE];
@@ -533,7 +536,7 @@ struct app_params {
        /* Init */
        char *eal_argv[1 + APP_EAL_ARGC];
        struct cpu_core_map *core_map;
-       uint64_t core_mask;
+       uint64_t core_mask[APP_CORE_MASK_SIZE];
        struct rte_mempool *mempool[APP_MAX_MEMPOOLS];
        struct app_link_data link_data[APP_MAX_LINKS];
        struct rte_ring *swq[APP_MAX_PKTQ_SWQ];
@@ -1359,6 +1362,36 @@ app_get_link_for_kni(struct app_params *app, struct app_pktq_kni_params *p_kni)
        return &app->link_params[link_param_idx];
 }
 
+static inline uint32_t
+app_core_is_enabled(struct app_params *app, uint32_t lcore_id)
+{
+       return(app->core_mask[lcore_id / 64] &
+               (1LLU << (lcore_id % 64)));
+}
+
+static inline void
+app_core_enable_in_core_mask(struct app_params *app, int lcore_id)
+{
+       app->core_mask[lcore_id / 64] |= 1LLU << (lcore_id % 64);
+
+}
+
+static inline void
+app_core_build_core_mask_string(struct app_params *app, char *mask_buffer)
+{
+       int i;
+
+       mask_buffer[0] = '\0';
+       for (i = (int)RTE_DIM(app->core_mask); i > 0; i--) {
+               /* For Hex representation of bits in uint64_t */
+               char buffer[(64 / 8) * 2 + 1];
+               memset(buffer, 0, sizeof(buffer));
+               snprintf(buffer, sizeof(buffer), "%016" PRIx64,
+                        app->core_mask[i-1]);
+               strcat(mask_buffer, buffer);
+       }
+}
+
 void app_pipeline_params_get(struct app_params *app,
        struct app_pipeline_params *p_in,
        struct pipeline_params *p_out);
index 3b36b53..d46bd36 100644 (file)
@@ -78,11 +78,14 @@ app_init_core_map(struct app_params *app)
                cpu_core_map_print(app->core_map);
 }
 
+/* Core Mask String in Hex Representation */
+#define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
+
 static void
 app_init_core_mask(struct app_params *app)
 {
-       uint64_t mask = 0;
        uint32_t i;
+       char core_mask_str[APP_CORE_MASK_STRING_SIZE];
 
        for (i = 0; i < app->n_pipelines; i++) {
                struct app_pipeline_params *p = &app->pipeline_params[i];
@@ -96,17 +99,18 @@ app_init_core_mask(struct app_params *app)
                if (lcore_id < 0)
                        rte_panic("Cannot create CPU core mask\n");
 
-               mask |= 1LLU << lcore_id;
+               app_core_enable_in_core_mask(app, lcore_id);
        }
 
-       app->core_mask = mask;
-       APP_LOG(app, HIGH, "CPU core mask = 0x%016" PRIx64, app->core_mask);
+       app_core_build_core_mask_string(app, core_mask_str);
+       APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
 }
 
 static void
 app_init_eal(struct app_params *app)
 {
        char buffer[256];
+       char core_mask_str[APP_CORE_MASK_STRING_SIZE];
        struct app_eal_params *p = &app->eal_params;
        uint32_t n_args = 0;
        uint32_t i;
@@ -114,7 +118,8 @@ app_init_eal(struct app_params *app)
 
        app->eal_argv[n_args++] = strdup(app->app_name);
 
-       snprintf(buffer, sizeof(buffer), "-c%" PRIx64, app->core_mask);
+       app_core_build_core_mask_string(app, core_mask_str);
+       snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
        app->eal_argv[n_args++] = strdup(buffer);
 
        if (p->coremap) {
index 8b71a7d..7ab0afe 100644 (file)
@@ -589,7 +589,7 @@ pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
                                params->name, arg_name);
                        dma_hash_lb_present = 1;
 
-                       if (strcmp(arg_value, "hash") ||
+                       if (strcmp(arg_value, "hash") &&
                                strcmp(arg_value, "HASH"))
 
                                PIPELINE_PARSE_ERR_INV_VAL(0,
index 6c547ca..4590c2b 100644 (file)
@@ -70,8 +70,7 @@ app_pipeline_enable(struct app_params *app,
                        core_id,
                        hyper_th_id);
 
-       if ((thread_id < 0) ||
-               ((app->core_mask & (1LLU << thread_id)) == 0))
+       if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
                return -1;
 
        if (app_pipeline_data(app, pipeline_id) == NULL)
@@ -134,8 +133,7 @@ app_pipeline_disable(struct app_params *app,
                        core_id,
                        hyper_th_id);
 
-       if ((thread_id < 0) ||
-               ((app->core_mask & (1LLU << thread_id)) == 0))
+       if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
                return -1;
 
        if (app_pipeline_data(app, pipeline_id) == NULL)
@@ -188,8 +186,7 @@ app_thread_headroom(struct app_params *app,
                        core_id,
                        hyper_th_id);
 
-       if ((thread_id < 0) ||
-               ((app->core_mask & (1LLU << thread_id)) == 0))
+       if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
                return -1;
 
        req = app_msg_alloc(app);
index 43fef59..bc88be5 100644 (file)
@@ -200,7 +200,7 @@ struct lcore_queue_conf {
        unsigned nb_crypto_devs;
        unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
 
-       struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+       struct op_buffer op_buf[RTE_CRYPTO_MAX_DEVS];
        struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
 } __rte_cache_aligned;
 
@@ -299,7 +299,7 @@ print_stats(void)
 
        for (cdevid = 0; cdevid < RTE_CRYPTO_MAX_DEVS; cdevid++) {
                /* skip disabled ports */
-               if ((l2fwd_enabled_crypto_mask & (1lu << cdevid)) == 0)
+               if ((l2fwd_enabled_crypto_mask & (((uint64_t)1) << cdevid)) == 0)
                        continue;
                printf("\nStatistics for cryptodev %"PRIu64
                                " -------------------------"
@@ -1808,7 +1808,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
                        return -1;
                }
 
-               l2fwd_enabled_crypto_mask |= (1 << cdev_id);
+               l2fwd_enabled_crypto_mask |= (((uint64_t)1) << cdev_id);
 
                enabled_cdevs[cdev_id] = 1;
                enabled_cdev_count++;
index 0709859..eddaf92 100644 (file)
@@ -1393,7 +1393,7 @@ create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
                mtu = 64 * 1024;
 
        nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
-                       (mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
+                       (mbuf_size - RTE_PKTMBUF_HEADROOM);
        nr_mbufs_per_core += nr_rx_desc;
        nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
 
@@ -1436,11 +1436,12 @@ main(int argc, char *argv[])
        if (ret < 0)
                rte_exit(EXIT_FAILURE, "Invalid argument\n");
 
-       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
                TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
 
                if (rte_lcore_is_enabled(lcore_id))
-                       lcore_ids[core_id ++] = lcore_id;
+                       lcore_ids[core_id++] = lcore_id;
+       }
 
        if (rte_lcore_count() > RTE_MAX_LCORE)
                rte_exit(EXIT_FAILURE,"Not enough cores\n");
index 127e8d0..54e95d5 100644 (file)
@@ -225,13 +225,14 @@ rte_cryptodev_create_vdev(const char *name, const char *args)
 }
 
 int
-rte_cryptodev_get_dev_id(const char *name) {
+rte_cryptodev_get_dev_id(const char *name)
+{
        unsigned i;
 
        if (name == NULL)
                return -1;
 
-       for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
+       for (i = 0; i < rte_cryptodev_globals->nb_devs; i++)
                if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name)
                                == 0) &&
                                (rte_cryptodev_globals->devs[i].attached ==
index abfe2dc..c6a5794 100644 (file)
@@ -183,8 +183,9 @@ rte_cryptodev_pmd_get_named_dev(const char *name)
        if (name == NULL)
                return NULL;
 
-       for (i = 0, dev = &rte_cryptodev_globals->devs[i];
-                       i < rte_cryptodev_globals->max_devs; i++) {
+       for (i = 0; i < rte_cryptodev_globals->max_devs; i++) {
+               dev = &rte_cryptodev_globals->devs[i];
+
                if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
                                (strcmp(dev->data->name, name) == 0))
                        return dev;
index e403717..ffa8ad9 100644 (file)
@@ -72,6 +72,7 @@ rte_eal_parse_devargs_str(const char *devargs_str,
 
        if (*drvargs == NULL) {
                free(*drvname);
+               *drvname = NULL;
                return -1;
        }
        return 0;
index 0ff2377..7d6e54f 100644 (file)
@@ -111,6 +111,14 @@ rte_eal_vdev_uninit(const char *name)
                        return driver->remove(name);
        }
 
+       /* Give new names precedence over aliases. */
+       TAILQ_FOREACH(driver, &vdev_driver_list, next) {
+               if (driver->driver.alias &&
+                   !strncmp(driver->driver.alias, name,
+                           strlen(driver->driver.alias)))
+                       return driver->remove(name);
+       }
+
        RTE_LOG(ERR, EAL, "no driver found for %s\n", name);
        return -EINVAL;
 }
index da204e6..0de35fb 100644 (file)
@@ -66,7 +66,7 @@ extern "C" {
 /**
  * Patch level number i.e. the z in yy.mm.z
  */
-#define RTE_VER_MINOR 0
+#define RTE_VER_MINOR 1
 
 /**
  * Extra string to be appended to version number
index 1786b75..3e4ffb5 100644 (file)
@@ -133,7 +133,7 @@ pci_mknod_uio_dev(const char *sysfs_uio_path, unsigned uio_num)
        snprintf(filename, sizeof(filename), "/dev/uio%u", uio_num);
        dev = makedev(major, minor);
        ret = mknod(filename, S_IFCHR | S_IRUSR | S_IWUSR, dev);
-       if (f == NULL) {
+       if (ret != 0) {
                RTE_LOG(ERR, EAL, "%s(): mknod() failed %s\n",
                        __func__, strerror(errno));
                return -1;
index 00cf919..fb4a2f8 100644 (file)
@@ -301,6 +301,7 @@ vfio_mp_sync_thread(void __rte_unused * arg)
                                vfio_mp_sync_send_request(conn_sock, SOCKET_ERR);
                        else
                                vfio_mp_sync_send_fd(conn_sock, fd);
+                       close(fd);
                        break;
                case SOCKET_REQ_GROUP:
                        /* wait for group number */
index fde8112..5a31759 100644 (file)
@@ -189,6 +189,20 @@ rte_eth_dev_find_free_port(void)
        return RTE_MAX_ETHPORTS;
 }
 
+static struct rte_eth_dev *
+eth_dev_get(uint8_t port_id)
+{
+       struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
+
+       eth_dev->data = &rte_eth_dev_data[port_id];
+       eth_dev->attached = DEV_ATTACHED;
+
+       eth_dev_last_created_port = port_id;
+       nb_ports++;
+
+       return eth_dev;
+}
+
 struct rte_eth_dev *
 rte_eth_dev_allocate(const char *name)
 {
@@ -210,13 +224,41 @@ rte_eth_dev_allocate(const char *name)
                return NULL;
        }
 
-       eth_dev = &rte_eth_devices[port_id];
-       eth_dev->data = &rte_eth_dev_data[port_id];
+       eth_dev = eth_dev_get(port_id);
        snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
        eth_dev->data->port_id = port_id;
-       eth_dev->attached = DEV_ATTACHED;
-       eth_dev_last_created_port = port_id;
-       nb_ports++;
+
+       return eth_dev;
+}
+
+/*
+ * Attach to a port already registered by the primary process, which
+ * makes sure that the same device would have the same port id both
+ * in the primary and secondary process.
+ */
+static struct rte_eth_dev *
+eth_dev_attach_secondary(const char *name)
+{
+       uint8_t i;
+       struct rte_eth_dev *eth_dev;
+
+       if (rte_eth_dev_data == NULL)
+               rte_eth_dev_data_alloc();
+
+       for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+               if (strcmp(rte_eth_dev_data[i].name, name) == 0)
+                       break;
+       }
+       if (i == RTE_MAX_ETHPORTS) {
+               RTE_PMD_DEBUG_TRACE(
+                       "device %s is not driven by the primary process\n",
+                       name);
+               return NULL;
+       }
+
+       eth_dev = eth_dev_get(i);
+       RTE_ASSERT(eth_dev->data->port_id == i);
+
        return eth_dev;
 }
 
@@ -246,16 +288,28 @@ rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
        rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
                        sizeof(ethdev_name));
 
-       eth_dev = rte_eth_dev_allocate(ethdev_name);
-       if (eth_dev == NULL)
-               return -ENOMEM;
-
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+               eth_dev = rte_eth_dev_allocate(ethdev_name);
+               if (eth_dev == NULL)
+                       return -ENOMEM;
+
                eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
                                  eth_drv->dev_private_size,
                                  RTE_CACHE_LINE_SIZE);
                if (eth_dev->data->dev_private == NULL)
                        rte_panic("Cannot allocate memzone for private port data\n");
+       } else {
+               eth_dev = eth_dev_attach_secondary(ethdev_name);
+               if (eth_dev == NULL) {
+                       /*
+                        * if we failed to attach a device, it means the
+                        * device is skipped in primary process, due to
+                        * some errors. If so, we return a positive value,
+                        * to let EAL skip it for the secondary process
+                        * as well.
+                        */
+                       return 1;
+               }
        }
        eth_dev->pci_dev = pci_dev;
        eth_dev->driver = eth_drv;
@@ -376,6 +430,9 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
                return -EINVAL;
        }
 
+       if (!nb_ports)
+               return -ENODEV;
+
        *port_id = RTE_MAX_ETHPORTS;
 
        for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
@@ -1343,8 +1400,10 @@ get_xstats_count(uint8_t port_id)
        } else
                count = 0;
        count += RTE_NB_STATS;
-       count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
-       count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
+       count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
+                RTE_NB_RXQ_STATS;
+       count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
+                RTE_NB_TXQ_STATS;
        return count;
 }
 
@@ -1358,6 +1417,7 @@ rte_eth_xstats_get_names(uint8_t port_id,
        int cnt_expected_entries;
        int cnt_driver_entries;
        uint32_t idx, id_queue;
+       uint16_t num_q;
 
        cnt_expected_entries = get_xstats_count(port_id);
        if (xstats_names == NULL || cnt_expected_entries < 0 ||
@@ -1374,7 +1434,8 @@ rte_eth_xstats_get_names(uint8_t port_id,
                        "%s", rte_stats_strings[idx].name);
                cnt_used_entries++;
        }
-       for (id_queue = 0; id_queue < dev->data->nb_rx_queues; id_queue++) {
+       num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       for (id_queue = 0; id_queue < num_q; id_queue++) {
                for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
                        snprintf(xstats_names[cnt_used_entries].name,
                                sizeof(xstats_names[0].name),
@@ -1384,7 +1445,8 @@ rte_eth_xstats_get_names(uint8_t port_id,
                }
 
        }
-       for (id_queue = 0; id_queue < dev->data->nb_tx_queues; id_queue++) {
+       num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       for (id_queue = 0; id_queue < num_q; id_queue++) {
                for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
                        snprintf(xstats_names[cnt_used_entries].name,
                                sizeof(xstats_names[0].name),
@@ -1420,14 +1482,18 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
        unsigned count = 0, i, q;
        signed xcount = 0;
        uint64_t val, *stats_ptr;
+       uint16_t nb_rxqs, nb_txqs;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
        dev = &rte_eth_devices[port_id];
 
+       nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
        /* Return generic statistics */
-       count = RTE_NB_STATS + (dev->data->nb_rx_queues * RTE_NB_RXQ_STATS) +
-               (dev->data->nb_tx_queues * RTE_NB_TXQ_STATS);
+       count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
+               (nb_txqs * RTE_NB_TXQ_STATS);
 
        /* implemented by the driver */
        if (dev->dev_ops->xstats_get != NULL) {
@@ -1458,7 +1524,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
        }
 
        /* per-rxq stats */
-       for (q = 0; q < dev->data->nb_rx_queues; q++) {
+       for (q = 0; q < nb_rxqs; q++) {
                for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
                        stats_ptr = RTE_PTR_ADD(&eth_stats,
                                        rte_rxq_stats_strings[i].offset +
@@ -1469,7 +1535,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
        }
 
        /* per-txq stats */
-       for (q = 0; q < dev->data->nb_tx_queues; q++) {
+       for (q = 0; q < nb_txqs; q++) {
                for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
                        stats_ptr = RTE_PTR_ADD(&eth_stats,
                                        rte_txq_stats_strings[i].offset +
@@ -1479,8 +1545,11 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
                }
        }
 
-       for (i = 0; i < count + xcount; i++)
+       for (i = 0; i < count; i++)
                xstats[i].id = i;
+       /* add an offset to driver-specific stats */
+       for ( ; i < count + xcount; i++)
+               xstats[i].id += count;
 
        return count + xcount;
 }
index 72be66d..fd62263 100644 (file)
@@ -19,7 +19,6 @@ DPDK_2.2 {
        rte_eth_dev_bypass_ver_show;
        rte_eth_dev_bypass_wd_reset;
        rte_eth_dev_bypass_wd_timeout_show;
-       rte_eth_dev_callback_process;
        rte_eth_dev_callback_register;
        rte_eth_dev_callback_unregister;
        rte_eth_dev_close;
index 440f3b1..956ce04 100644 (file)
@@ -610,9 +610,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
  *   never be used. The access to the per-lcore table is of course
  *   faster than the multi-producer/consumer pool. The cache can be
  *   disabled if the cache_size argument is set to 0; it can be useful to
- *   avoid losing objects in cache. Note that even if not used, the
- *   memory space for cache is always reserved in a mempool structure,
- *   except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
+ *   avoid losing objects in cache.
  * @param private_data_size
  *   The size of the private data appended after the mempool
  *   structure. This is useful for storing some private data after the
index 5fd8af2..817f77e 100644 (file)
@@ -118,7 +118,7 @@ stack_dequeue(struct rte_mempool *mp, void **obj_table,
 
        s->len -= n;
        rte_spinlock_unlock(&s->sl);
-       return n;
+       return 0;
 }
 
 static unsigned
index e6dace2..614705d 100644 (file)
@@ -735,12 +735,14 @@ void
 rte_sched_port_free(struct rte_sched_port *port)
 {
        uint32_t qindex;
-       uint32_t n_queues_per_port = rte_sched_port_queues_per_port(port);
+       uint32_t n_queues_per_port;
 
        /* Check user parameters */
        if (port == NULL)
                return;
 
+       n_queues_per_port = rte_sched_port_queues_per_port(port);
+
        /* Free enqueued mbufs */
        for (qindex = 0; qindex < n_queues_per_port; qindex++) {
                struct rte_mbuf **mbufs = rte_sched_port_qbase(port, qindex);
index 2d3eeb7..8a075da 100644 (file)
 #include <stdio.h>
 #include <stdlib.h>
 #include <sys/socket.h>
-#include <sys/select.h>
 #include <sys/time.h>
 #include <sys/types.h>
 #include <unistd.h>
+#include <string.h>
 
 #include <rte_common.h>
 #include <rte_log.h>
 
 #include "fd_man.h"
 
-/**
- * Returns the index in the fdset for a given fd.
- * If fd is -1, it means to search for a free entry.
- * @return
- *   index for the fd, or -1 if fd isn't in the fdset.
- */
+#define FDPOLLERR (POLLERR | POLLHUP | POLLNVAL)
+
 static int
-fdset_find_fd(struct fdset *pfdset, int fd)
+get_last_valid_idx(struct fdset *pfdset, int last_valid_idx)
 {
        int i;
 
-       if (pfdset == NULL)
-               return -1;
-
-       for (i = 0; i < MAX_FDS && pfdset->fd[i].fd != fd; i++)
+       for (i = last_valid_idx; i >= 0 && pfdset->fd[i].fd == -1; i--)
                ;
 
-       return i ==  MAX_FDS ? -1 : i;
+       return i;
 }
 
-static int
-fdset_find_free_slot(struct fdset *pfdset)
+static void
+fdset_move(struct fdset *pfdset, int dst, int src)
 {
-       return fdset_find_fd(pfdset, -1);
+       pfdset->fd[dst]    = pfdset->fd[src];
+       pfdset->rwfds[dst] = pfdset->rwfds[src];
 }
 
-static int
-fdset_add_fd(struct fdset  *pfdset, int idx, int fd,
-       fd_cb rcb, fd_cb wcb, void *dat)
+/*
+ * Find deleted fd entries and remove them
+ */
+static void
+fdset_shrink(struct fdset *pfdset)
 {
-       struct fdentry *pfdentry;
+       int i;
+       int last_valid_idx = get_last_valid_idx(pfdset, pfdset->num - 1);
 
-       if (pfdset == NULL || idx >= MAX_FDS || fd >= FD_SETSIZE)
-               return -1;
+       pthread_mutex_lock(&pfdset->fd_mutex);
 
-       pfdentry = &pfdset->fd[idx];
-       pfdentry->fd = fd;
-       pfdentry->rcb = rcb;
-       pfdentry->wcb = wcb;
-       pfdentry->dat = dat;
+       for (i = 0; i < last_valid_idx; i++) {
+               if (pfdset->fd[i].fd != -1)
+                       continue;
 
-       return 0;
+               fdset_move(pfdset, i, last_valid_idx);
+               last_valid_idx = get_last_valid_idx(pfdset, last_valid_idx - 1);
+       }
+       pfdset->num = last_valid_idx + 1;
+
+       pthread_mutex_unlock(&pfdset->fd_mutex);
 }
 
 /**
- * Fill the read/write fd_set with the fds in the fdset.
+ * Returns the index in the fdset for a given fd.
  * @return
- *  the maximum fds filled in the read/write fd_set.
+ *   index for the fd, or -1 if fd isn't in the fdset.
  */
 static int
-fdset_fill(fd_set *rfset, fd_set *wfset, struct fdset *pfdset)
+fdset_find_fd(struct fdset *pfdset, int fd)
 {
-       struct fdentry *pfdentry;
-       int i, maxfds = -1;
-       int num = MAX_FDS;
+       int i;
 
-       if (pfdset == NULL)
-               return -1;
+       for (i = 0; i < pfdset->num && pfdset->fd[i].fd != fd; i++)
+               ;
 
-       for (i = 0; i < num; i++) {
-               pfdentry = &pfdset->fd[i];
-               if (pfdentry->fd != -1) {
-                       int added = 0;
-                       if (pfdentry->rcb && rfset) {
-                               FD_SET(pfdentry->fd, rfset);
-                               added = 1;
-                       }
-                       if (pfdentry->wcb && wfset) {
-                               FD_SET(pfdentry->fd, wfset);
-                               added = 1;
-                       }
-                       if (added)
-                               maxfds = pfdentry->fd < maxfds ?
-                                       maxfds : pfdentry->fd;
-               }
-       }
-       return maxfds;
+       return i == pfdset->num ? -1 : i;
+}
+
+static void
+fdset_add_fd(struct fdset *pfdset, int idx, int fd,
+       fd_cb rcb, fd_cb wcb, void *dat)
+{
+       struct fdentry *pfdentry = &pfdset->fd[idx];
+       struct pollfd *pfd = &pfdset->rwfds[idx];
+
+       pfdentry->fd  = fd;
+       pfdentry->rcb = rcb;
+       pfdentry->wcb = wcb;
+       pfdentry->dat = dat;
+
+       pfd->fd = fd;
+       pfd->events  = rcb ? POLLIN : 0;
+       pfd->events |= wcb ? POLLOUT : 0;
+       pfd->revents = 0;
 }
 
 void
@@ -151,16 +149,13 @@ fdset_add(struct fdset *pfdset, int fd, fd_cb rcb, fd_cb wcb, void *dat)
                return -1;
 
        pthread_mutex_lock(&pfdset->fd_mutex);
-
-       /* Find a free slot in the list. */
-       i = fdset_find_free_slot(pfdset);
-       if (i == -1 || fdset_add_fd(pfdset, i, fd, rcb, wcb, dat) < 0) {
+       i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
+       if (i == -1) {
                pthread_mutex_unlock(&pfdset->fd_mutex);
                return -2;
        }
 
-       pfdset->num++;
-
+       fdset_add_fd(pfdset, i, fd, rcb, wcb, dat);
        pthread_mutex_unlock(&pfdset->fd_mutex);
 
        return 0;
@@ -189,7 +184,6 @@ fdset_del(struct fdset *pfdset, int fd)
                        pfdset->fd[i].fd = -1;
                        pfdset->fd[i].rcb = pfdset->fd[i].wcb = NULL;
                        pfdset->fd[i].dat = NULL;
-                       pfdset->num--;
                        i = -1;
                }
                pthread_mutex_unlock(&pfdset->fd_mutex);
@@ -198,24 +192,6 @@ fdset_del(struct fdset *pfdset, int fd)
        return dat;
 }
 
-/**
- *  Unregister the fd at the specified slot from the fdset.
- */
-static void
-fdset_del_slot(struct fdset *pfdset, int index)
-{
-       if (pfdset == NULL || index < 0 || index >= MAX_FDS)
-               return;
-
-       pthread_mutex_lock(&pfdset->fd_mutex);
-
-       pfdset->fd[index].fd = -1;
-       pfdset->fd[index].rcb = pfdset->fd[index].wcb = NULL;
-       pfdset->fd[index].dat = NULL;
-       pfdset->num--;
-
-       pthread_mutex_unlock(&pfdset->fd_mutex);
-}
 
 /**
  * This functions runs in infinite blocking loop until there is no fd in
@@ -229,55 +205,64 @@ fdset_del_slot(struct fdset *pfdset, int index)
 void
 fdset_event_dispatch(struct fdset *pfdset)
 {
-       fd_set rfds, wfds;
-       int i, maxfds;
+       int i;
+       struct pollfd *pfd;
        struct fdentry *pfdentry;
-       int num = MAX_FDS;
        fd_cb rcb, wcb;
        void *dat;
-       int fd;
+       int fd, numfds;
        int remove1, remove2;
-       int ret;
+       int need_shrink;
 
        if (pfdset == NULL)
                return;
 
        while (1) {
-               struct timeval tv;
-               tv.tv_sec = 1;
-               tv.tv_usec = 0;
-               FD_ZERO(&rfds);
-               FD_ZERO(&wfds);
-               pthread_mutex_lock(&pfdset->fd_mutex);
-
-               maxfds = fdset_fill(&rfds, &wfds, pfdset);
-
-               pthread_mutex_unlock(&pfdset->fd_mutex);
 
                /*
-                * When select is blocked, other threads might unregister
+                * When poll is blocked, other threads might unregister
                 * listenfds from and register new listenfds into fdset.
-                * When select returns, the entries for listenfds in the fdset
+                * When poll returns, the entries for listenfds in the fdset
                 * might have been updated. It is ok if there is unwanted call
                 * for new listenfds.
                 */
-               ret = select(maxfds + 1, &rfds, &wfds, NULL, &tv);
-               if (ret <= 0)
-                       continue;
+               pthread_mutex_lock(&pfdset->fd_mutex);
+               numfds = pfdset->num;
+               pthread_mutex_unlock(&pfdset->fd_mutex);
 
-               for (i = 0; i < num; i++) {
-                       remove1 = remove2 = 0;
+               poll(pfdset->rwfds, numfds, 1000 /* millisecs */);
+
+               need_shrink = 0;
+               for (i = 0; i < numfds; i++) {
                        pthread_mutex_lock(&pfdset->fd_mutex);
+
                        pfdentry = &pfdset->fd[i];
                        fd = pfdentry->fd;
+                       pfd = &pfdset->rwfds[i];
+
+                       if (fd < 0) {
+                               need_shrink = 1;
+                               pthread_mutex_unlock(&pfdset->fd_mutex);
+                               continue;
+                       }
+
+                       if (!pfd->revents) {
+                               pthread_mutex_unlock(&pfdset->fd_mutex);
+                               continue;
+                       }
+
+                       remove1 = remove2 = 0;
+
                        rcb = pfdentry->rcb;
                        wcb = pfdentry->wcb;
                        dat = pfdentry->dat;
                        pfdentry->busy = 1;
+
                        pthread_mutex_unlock(&pfdset->fd_mutex);
-                       if (fd >= 0 && FD_ISSET(fd, &rfds) && rcb)
+
+                       if (rcb && pfd->revents & (POLLIN | FDPOLLERR))
                                rcb(fd, dat, &remove1);
-                       if (fd >= 0 && FD_ISSET(fd, &wfds) && wcb)
+                       if (wcb && pfd->revents & (POLLOUT | FDPOLLERR))
                                wcb(fd, dat, &remove2);
                        pfdentry->busy = 0;
                        /*
@@ -292,8 +277,13 @@ fdset_event_dispatch(struct fdset *pfdset)
                         * listen fd in another thread, we couldn't call
                         * fd_set_del.
                         */
-                       if (remove1 || remove2)
-                               fdset_del_slot(pfdset, i);
+                       if (remove1 || remove2) {
+                               pfdentry->fd = -1;
+                               need_shrink = 1;
+                       }
                }
+
+               if (need_shrink)
+                       fdset_shrink(pfdset);
        }
 }
index bd66ed1..d319cac 100644 (file)
@@ -35,6 +35,7 @@
 #define _FD_MAN_H_
 #include <stdint.h>
 #include <pthread.h>
+#include <poll.h>
 
 #define MAX_FDS 1024
 
@@ -49,6 +50,7 @@ struct fdentry {
 };
 
 struct fdset {
+       struct pollfd rwfds[MAX_FDS];
        struct fdentry fd[MAX_FDS];
        pthread_mutex_t fd_mutex;
        int num;        /* current fd number of this fdset */
index 31825b8..e415093 100644 (file)
@@ -250,6 +250,7 @@ vhost_new_device(void)
        if (i == MAX_VHOST_DEVICE) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "Failed to find a free slot for new device.\n");
+               rte_free(dev);
                return -1;
        }
 
index 6b83c15..0cb1c67 100644 (file)
@@ -447,14 +447,14 @@ add_guest_pages(struct virtio_net *dev, struct virtio_memory_region *reg,
        reg_size -= size;
 
        while (reg_size > 0) {
+               size = RTE_MIN(reg_size, page_size);
                host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)
                                                  host_user_addr);
-               add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
-                                  page_size);
+               add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
 
-               host_user_addr  += page_size;
-               guest_phys_addr += page_size;
-               reg_size -= page_size;
+               host_user_addr  += size;
+               guest_phys_addr += size;
+               reg_size -= size;
        }
 }
 
@@ -567,7 +567,8 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
                reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
                                      mmap_offset;
 
-               add_guest_pages(dev, reg, alignment);
+               if (dev->dequeue_zero_copy)
+                       add_guest_pages(dev, reg, alignment);
 
                RTE_LOG(INFO, VHOST_CONFIG,
                        "guest memory region %u, size: 0x%" PRIx64 "\n"
index 595f67c..337470d 100644 (file)
@@ -195,6 +195,8 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
        struct vring_desc *desc;
        uint64_t desc_addr;
        struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
+       /* A counter to avoid desc dead loop chain */
+       uint16_t nr_desc = 1;
 
        desc = &descs[desc_idx];
        desc_addr = gpa_to_vva(dev, desc->addr);
@@ -233,7 +235,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
                                /* Room in vring buffer is not enough */
                                return -1;
                        }
-                       if (unlikely(desc->next >= size))
+                       if (unlikely(desc->next >= size || ++nr_desc > size))
                                return -1;
 
                        desc = &descs[desc->next];
@@ -677,6 +679,7 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
        default:
                m->l3_len = 0;
                *l4_proto = 0;
+               *l4_hdr = NULL;
                break;
        }
 }
@@ -713,7 +716,7 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
                }
        }
 
-       if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+       if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
                case VIRTIO_NET_HDR_GSO_TCPV4:
                case VIRTIO_NET_HDR_GSO_TCPV6:
index ba2a476..134c2b4 100644 (file)
@@ -30,7 +30,7 @@
 # OF THE POSSIBILITY OF SUCH DAMAGE.
 
 Name: dpdk
-Version: 16.11
+Version: 16.11.1
 Release: 1
 Packager: packaging@6wind.com
 URL: http://dpdk.org
index f1d374d..fef59c4 100755 (executable)
@@ -328,6 +328,9 @@ def get_crypto_details():
 
     # based on the basic info, get extended text details
     for d in devices.keys():
+        if devices[d]["Class"][0:2] != CRYPTO_BASE_CLASS:
+            continue
+
         # get additional info and add it to existing data
         devices[d] = devices[d].copy()
         devices[d].update(get_pci_device_details(d).items())