New upstream version 18.11-rc4 22/16022/1 upstream/18.11-rc4
authorLuca Boccassi <luca.boccassi@gmail.com>
Mon, 19 Nov 2018 12:59:01 +0000 (12:59 +0000)
committerLuca Boccassi <luca.boccassi@gmail.com>
Mon, 19 Nov 2018 12:59:24 +0000 (12:59 +0000)
Change-Id: I861e1a2f7df210f57f44f1ad56b9ef789a4675e3
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
75 files changed:
MAINTAINERS
app/pdump/main.c
app/test-pmd/cmdline_mtr.c
app/test-pmd/testpmd.c
config/arm/meson.build
config/meson.build
devtools/check-symbol-change.sh
doc/build-sdk-meson.txt
doc/guides/cryptodevs/dpaa2_sec.rst
doc/guides/cryptodevs/dpaa_sec.rst
doc/guides/cryptodevs/octeontx.rst
doc/guides/eventdevs/dpaa.rst
doc/guides/eventdevs/dpaa2.rst
doc/guides/linux_gsg/linux_drivers.rst
doc/guides/linux_gsg/sys_reqs.rst
doc/guides/nics/dpaa.rst
doc/guides/nics/dpaa2.rst
doc/guides/platform/dpaa.rst [new file with mode: 0644]
doc/guides/platform/dpaa2.rst [new file with mode: 0644]
doc/guides/platform/index.rst
doc/guides/platform/octeontx.rst
doc/guides/prog_guide/metrics_lib.rst
doc/guides/rawdevs/dpaa2_cmdif.rst
doc/guides/rawdevs/dpaa2_qdma.rst
doc/guides/rel_notes/release_18_11.rst
doc/guides/sample_app_ug/compiling.rst
doc/guides/sample_app_ug/flow_filtering.rst
doc/guides/sample_app_ug/ip_reassembly.rst
doc/guides/sample_app_ug/ipv4_multicast.rst
doc/guides/sample_app_ug/vm_power_management.rst
drivers/bus/pci/linux/pci_vfio.c
drivers/common/cpt/cpt_ucode.h
drivers/net/avf/avf_rxtx.h
drivers/net/bonding/rte_eth_bond_pmd.c
drivers/net/ena/ena_ethdev.c
drivers/net/i40e/i40e_ethdev.c
drivers/net/ixgbe/base/ixgbe_common.c
drivers/net/mlx4/mlx4_mr.c
drivers/net/mlx4/mlx4_rxtx.h
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_flow.h
drivers/net/mlx5/mlx5_flow_dv.c
drivers/net/mlx5/mlx5_flow_tcf.c
drivers/net/mlx5/mlx5_flow_verbs.c
drivers/net/mlx5/mlx5_mr.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.h
drivers/net/mlx5/mlx5_rxtx_vec_neon.h
drivers/net/mlx5/mlx5_rxtx_vec_sse.h
drivers/net/mlx5/mlx5_stats.c
drivers/net/mvpp2/mrvl_mtr.c
drivers/net/octeontx/octeontx_ethdev.c
drivers/net/pcap/rte_eth_pcap.c
examples/ipv4_multicast/main.c
examples/vhost_crypto/main.c
lib/librte_eal/common/eal_common_memalloc.c
lib/librte_eal/common/eal_filesystem.h
lib/librte_eal/common/include/rte_version.h
lib/librte_eal/common/malloc_elem.c
lib/librte_eal/linuxapp/eal/eal_dev.c
lib/librte_eal/linuxapp/eal/eal_memalloc.c
lib/librte_eal/linuxapp/eal/eal_memory.c
lib/librte_efd/rte_efd.c
lib/librte_security/Makefile
lib/librte_security/meson.build
lib/librte_security/rte_security.c
lib/librte_security/rte_security.h
lib/librte_security/rte_security_driver.h
lib/librte_security/rte_security_version.map
meson.build
test/test/test_eal_flags.c
test/test/test_interrupts.c
test/test/test_pmd_ring_perf.c
test/test/test_power_kvm_vm.c
usertools/dpdk-devbind.py

index 19353ac..71ba312 100644 (file)
@@ -233,7 +233,7 @@ F: drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
 F: drivers/net/i40e/i40e_rxtx_vec_neon.c
 F: drivers/net/virtio/virtio_rxtx_simple_neon.c
 
 F: drivers/net/i40e/i40e_rxtx_vec_neon.c
 F: drivers/net/virtio/virtio_rxtx_simple_neon.c
 
-IBM POWER
+IBM POWER (alpha)
 M: Chao Zhu <chaozhu@linux.vnet.ibm.com>
 F: lib/librte_eal/common/arch/ppc_64/
 F: lib/librte_eal/common/include/arch/ppc_64/
 M: Chao Zhu <chaozhu@linux.vnet.ibm.com>
 F: lib/librte_eal/common/arch/ppc_64/
 F: lib/librte_eal/common/include/arch/ppc_64/
@@ -347,7 +347,7 @@ F: lib/librte_cryptodev/
 F: test/test/test_cryptodev*
 F: examples/l2fwd-crypto/
 
 F: test/test/test_cryptodev*
 F: examples/l2fwd-crypto/
 
-Security API - EXPERIMENTAL
+Security API
 M: Akhil Goyal <akhil.goyal@nxp.com>
 M: Declan Doherty <declan.doherty@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 M: Akhil Goyal <akhil.goyal@nxp.com>
 M: Declan Doherty <declan.doherty@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
index d96556e..9e86bf6 100644 (file)
@@ -119,8 +119,8 @@ struct pdump_tuples {
 
        /* params for packet dumping */
        enum pdump_by dump_by_type;
 
        /* params for packet dumping */
        enum pdump_by dump_by_type;
-       int rx_vdev_id;
-       int tx_vdev_id;
+       uint16_t rx_vdev_id;
+       uint16_t tx_vdev_id;
        enum pcap_stream rx_vdev_stream_type;
        enum pcap_stream tx_vdev_stream_type;
        bool single_pdump_dev;
        enum pcap_stream rx_vdev_stream_type;
        enum pcap_stream tx_vdev_stream_type;
        bool single_pdump_dev;
@@ -266,7 +266,7 @@ parse_pdump(const char *optarg)
                                &parse_uint_value, &v);
                if (ret < 0)
                        goto free_kvlist;
                                &parse_uint_value, &v);
                if (ret < 0)
                        goto free_kvlist;
-               pt->port = (uint8_t) v.val;
+               pt->port = (uint16_t) v.val;
                pt->dump_by_type = PORT_ID;
        } else if (cnt2 == 1) {
                ret = rte_kvargs_process(kvlist, PDUMP_PCI_ARG,
                pt->dump_by_type = PORT_ID;
        } else if (cnt2 == 1) {
                ret = rte_kvargs_process(kvlist, PDUMP_PCI_ARG,
@@ -435,7 +435,7 @@ disable_pdump(struct pdump_tuples *pt)
 }
 
 static inline void
 }
 
 static inline void
-pdump_rxtx(struct rte_ring *ring, uint8_t vdev_id, struct pdump_stats *stats)
+pdump_rxtx(struct rte_ring *ring, uint16_t vdev_id, struct pdump_stats *stats)
 {
        /* write input packets of port to vdev for pdump */
        struct rte_mbuf *rxtx_bufs[BURST_SIZE];
 {
        /* write input packets of port to vdev for pdump */
        struct rte_mbuf *rxtx_bufs[BURST_SIZE];
@@ -462,7 +462,7 @@ pdump_rxtx(struct rte_ring *ring, uint8_t vdev_id, struct pdump_stats *stats)
 }
 
 static void
 }
 
 static void
-free_ring_data(struct rte_ring *ring, uint8_t vdev_id,
+free_ring_data(struct rte_ring *ring, uint16_t vdev_id,
                struct pdump_stats *stats)
 {
        while (rte_ring_count(ring))
                struct pdump_stats *stats)
 {
        while (rte_ring_count(ring))
index 846de88..c506d87 100644 (file)
@@ -1148,15 +1148,15 @@ static void cmd_set_port_meter_dscp_table_parsed(void *parsed_result,
        }
 
        if (port_id_is_invalid(port_id, ENABLED_WARN))
        }
 
        if (port_id_is_invalid(port_id, ENABLED_WARN))
-               return;
+               goto free_table;
 
        /* Update Meter DSCP Table*/
        ret = rte_mtr_meter_dscp_table_update(port_id, mtr_id,
                dscp_table, &error);
 
        /* Update Meter DSCP Table*/
        ret = rte_mtr_meter_dscp_table_update(port_id, mtr_id,
                dscp_table, &error);
-       if (ret != 0) {
+       if (ret != 0)
                print_err_msg(&error);
                print_err_msg(&error);
-               return;
-       }
+
+free_table:
        free(dscp_table);
 }
 
        free(dscp_table);
 }
 
index 9c0edca..4c75587 100644 (file)
@@ -506,7 +506,7 @@ static void check_all_ports_link_status(uint32_t port_mask);
 static int eth_event_callback(portid_t port_id,
                              enum rte_eth_event_type type,
                              void *param, void *ret_param);
 static int eth_event_callback(portid_t port_id,
                              enum rte_eth_event_type type,
                              void *param, void *ret_param);
-static void eth_dev_event_callback(const char *device_name,
+static void dev_event_callback(const char *device_name,
                                enum rte_dev_event_type type,
                                void *param);
 
                                enum rte_dev_event_type type,
                                void *param);
 
@@ -2434,7 +2434,7 @@ pmd_test_exit(void)
                }
 
                ret = rte_dev_event_callback_unregister(NULL,
                }
 
                ret = rte_dev_event_callback_unregister(NULL,
-                       eth_dev_event_callback, NULL);
+                       dev_event_callback, NULL);
                if (ret < 0) {
                        RTE_LOG(ERR, EAL,
                                "fail to unregister device event callback.\n");
                if (ret < 0) {
                        RTE_LOG(ERR, EAL,
                                "fail to unregister device event callback.\n");
@@ -2516,8 +2516,14 @@ check_all_ports_link_status(uint32_t port_mask)
        }
 }
 
        }
 }
 
+/*
+ * This callback is for remove a port for a device. It has limitation because
+ * it is not for multiple port removal for a device.
+ * TODO: the device detach invoke will plan to be removed from user side to
+ * eal. And convert all PMDs to free port resources on ether device closing.
+ */
 static void
 static void
-rmv_event_callback(void *arg)
+rmv_port_callback(void *arg)
 {
        int need_to_start = 0;
        int org_no_link_check = no_link_check;
 {
        int need_to_start = 0;
        int org_no_link_check = no_link_check;
@@ -2565,7 +2571,7 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
                if (port_id_is_invalid(port_id, DISABLED_WARN))
                        break;
                if (rte_eal_alarm_set(100000,
                if (port_id_is_invalid(port_id, DISABLED_WARN))
                        break;
                if (rte_eal_alarm_set(100000,
-                               rmv_event_callback, (void *)(intptr_t)port_id))
+                               rmv_port_callback, (void *)(intptr_t)port_id))
                        fprintf(stderr, "Could not set up deferred device removal\n");
                break;
        default:
                        fprintf(stderr, "Could not set up deferred device removal\n");
                break;
        default:
@@ -2598,7 +2604,7 @@ register_eth_event_callback(void)
 
 /* This function is used by the interrupt thread */
 static void
 
 /* This function is used by the interrupt thread */
 static void
-eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
+dev_event_callback(const char *device_name, enum rte_dev_event_type type,
                             __rte_unused void *arg)
 {
        uint16_t port_id;
                             __rte_unused void *arg)
 {
        uint16_t port_id;
@@ -2612,7 +2618,7 @@ eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
 
        switch (type) {
        case RTE_DEV_EVENT_REMOVE:
 
        switch (type) {
        case RTE_DEV_EVENT_REMOVE:
-               RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
+               RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
                        device_name);
                ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
                if (ret) {
                        device_name);
                ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
                if (ret) {
@@ -2620,7 +2626,19 @@ eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
                                device_name);
                        return;
                }
                                device_name);
                        return;
                }
-               rmv_event_callback((void *)(intptr_t)port_id);
+               /*
+                * Because the user's callback is invoked in eal interrupt
+                * callback, the interrupt callback need to be finished before
+                * it can be unregistered when detaching device. So finish
+                * callback soon and use a deferred removal to detach device
+                * is need. It is a workaround, once the device detaching be
+                * moved into the eal in the future, the deferred removal could
+                * be deleted.
+                */
+               if (rte_eal_alarm_set(100000,
+                               rmv_port_callback, (void *)(intptr_t)port_id))
+                       RTE_LOG(ERR, EAL,
+                               "Could not set up deferred device removal\n");
                break;
        case RTE_DEV_EVENT_ADD:
                RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
                break;
        case RTE_DEV_EVENT_ADD:
                RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
@@ -3167,7 +3185,7 @@ main(int argc, char** argv)
                }
 
                ret = rte_dev_event_callback_register(NULL,
                }
 
                ret = rte_dev_event_callback_register(NULL,
-                       eth_dev_event_callback, NULL);
+                       dev_event_callback, NULL);
                if (ret) {
                        RTE_LOG(ERR, EAL,
                                "fail  to register device event callback\n");
                if (ret) {
                        RTE_LOG(ERR, EAL,
                                "fail  to register device event callback\n");
index 4b23b39..b755138 100644 (file)
@@ -89,6 +89,9 @@ if cc.sizeof('void *') != 8
        dpdk_conf.set('RTE_CACHE_LINE_SIZE', 64)
        dpdk_conf.set('RTE_ARCH_ARM', 1)
        dpdk_conf.set('RTE_ARCH_ARMv7', 1)
        dpdk_conf.set('RTE_CACHE_LINE_SIZE', 64)
        dpdk_conf.set('RTE_ARCH_ARM', 1)
        dpdk_conf.set('RTE_ARCH_ARMv7', 1)
+       # the minimum architecture supported, armv7-a, needs the following,
+       # mk/machine/armv7a/rte.vars.mk sets it too
+       machine_args += '-mfpu=neon'
 else
        dpdk_conf.set('RTE_CACHE_LINE_SIZE', 128)
        dpdk_conf.set('RTE_ARCH_ARM64', 1)
 else
        dpdk_conf.set('RTE_CACHE_LINE_SIZE', 128)
        dpdk_conf.set('RTE_ARCH_ARM64', 1)
index 0b710b7..db32499 100644 (file)
@@ -7,10 +7,32 @@ if meson.is_cross_build()
 else
        machine = get_option('machine')
 endif
 else
        machine = get_option('machine')
 endif
+
+# machine type 'default' is special, it defaults to the per arch agreed common
+# minimal baseline needed for DPDK.
+# That might not be the most optimized, but the most portable version while
+# still being able to support the CPU features required for DPDK.
+# This can be bumped up by the DPDK project, but it can never be an
+# invariant like 'native'
+if machine == 'default'
+       if host_machine.cpu_family().startswith('x86')
+               # matches the old pre-meson build systems default
+               machine = 'corei7'
+       elif host_machine.cpu_family().startswith('arm')
+               machine = 'armv7-a'
+       elif host_machine.cpu_family().startswith('aarch')
+               # arm64 manages defaults in config/arm/meson.build
+               machine = 'default'
+       elif host_machine.cpu_family().startswith('ppc')
+               machine = 'power8'
+       endif
+endif
+
 dpdk_conf.set('RTE_MACHINE', machine)
 machine_args = []
 dpdk_conf.set('RTE_MACHINE', machine)
 machine_args = []
-# ppc64 does not support -march=native
-if host_machine.cpu_family().startswith('ppc') and machine == 'native'
+
+# ppc64 does not support -march= at all, use -mcpu and -mtune for that
+if host_machine.cpu_family().startswith('ppc')
        machine_args += '-mcpu=' + machine
        machine_args += '-mtune=' + machine
 else
        machine_args += '-mcpu=' + machine
        machine_args += '-mtune=' + machine
 else
index c0d2a6d..1d21e91 100755 (executable)
@@ -23,7 +23,7 @@ build_map_changes()
                # does not end in 'map', indicating we have left the map chunk.
                # When we hit this, turn off the in_map variable, which
                # supresses the subordonate rules below
                # does not end in 'map', indicating we have left the map chunk.
                # When we hit this, turn off the in_map variable, which
                # supresses the subordonate rules below
-               /[-+] a\/.*\.^(map)/ {in_map=0}
+               /[-+] a\/.*\.[^map]/ {in_map=0}
 
                # Triggering this rule, which starts a line and ends it
                # with a { identifies a versioned section.  The section name is
 
                # Triggering this rule, which starts a line and ends it
                # with a { identifies a versioned section.  The section name is
@@ -153,7 +153,6 @@ clean_and_exit_on_sig()
 build_map_changes "$patch" "$mapfile"
 check_for_rule_violations "$mapfile"
 exit_code=$?
 build_map_changes "$patch" "$mapfile"
 check_for_rule_violations "$mapfile"
 exit_code=$?
-
 rm -f "$mapfile"
 
 exit $exit_code
 rm -f "$mapfile"
 
 exit $exit_code
index 508e2cb..a4dd091 100644 (file)
@@ -132,7 +132,7 @@ Installing the Compiled Files
 
 Use ``ninja install`` to install the required DPDK files onto the system.
 The install prefix defaults to ``/usr/local`` but can be used as with other
 
 Use ``ninja install`` to install the required DPDK files onto the system.
 The install prefix defaults to ``/usr/local`` but can be used as with other
-options above. The environment variable ``DEST_DIR`` can be used to adjust
+options above. The environment variable ``DESTDIR`` can be used to adjust
 the root directory for the install, for example when packaging.
 
 With the base install directory, the individual directories for libraries
 the root directory for the install, for example when packaging.
 
 With the base install directory, the individual directories for libraries
@@ -203,3 +203,9 @@ From examples/helloworld/Makefile::
 
        build:
                @mkdir -p $@
 
        build:
                @mkdir -p $@
+
+NOTE: for --static builds, DPDK needs to be built with Meson >= 0.46 in order to
+fully generate the list of private dependencies. If DPDK is built with an older
+version of Meson, it might be necessary to manually specify dependencies of DPDK
+PMDs/libraries, for example -lmlx5 -lmnl for librte-pmd-mlx5, or the static link
+step might fail.
index 9191704..aee79ab 100644 (file)
@@ -129,7 +129,7 @@ AEAD algorithms:
 Supported DPAA2 SoCs
 --------------------
 
 Supported DPAA2 SoCs
 --------------------
 
-* LS2080A/LS2040A
+* LS2160A
 * LS2084A/LS2044A
 * LS2088A/LS2048A
 * LS1088A/LS1048A
 * LS2084A/LS2044A
 * LS2088A/LS2048A
 * LS1088A/LS1048A
@@ -157,31 +157,15 @@ Prerequisites
 DPAA2_SEC driver has similar pre-requisites as described in :ref:`dpaa2_overview`.
 The following dependencies are not part of DPDK and must be installed separately:
 
 DPAA2_SEC driver has similar pre-requisites as described in :ref:`dpaa2_overview`.
 The following dependencies are not part of DPDK and must be installed separately:
 
-* **NXP Linux SDK**
-
-  NXP Linux software development kit (SDK) includes support for the family
-  of QorIQ® ARM-Architecture-based system on chip (SoC) processors
-  and corresponding boards.
-
-  It includes the Linux board support packages (BSPs) for NXP SoCs,
-  a fully operational tool chain, kernel and board specific modules.
-
-  SDK and related information can be obtained from:  `NXP QorIQ SDK  <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-* **DPDK Extra Scripts**
-
-  DPAA2 based resources can be configured easily with the help of ready scripts
-  as provided in the DPDK helper repository.
-
-  `DPDK Extra Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa2` for setup information
 
 Currently supported by DPDK:
 
 
 Currently supported by DPDK:
 
-* NXP SDK **17.08+**.
-* MC Firmware version **10.3.1** and higher.
-* Supported architectures:  **arm64 LE**.
+- NXP SDK **18.09+**.
+- MC Firmware version **10.10.0** and higher.
+- Supported architectures:  **arm64 LE**.
 
 
-* Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
 
 Pre-Installation Configuration
 ------------------------------
 
 Pre-Installation Configuration
 ------------------------------
index dd68389..897a4fe 100644 (file)
@@ -101,32 +101,11 @@ Prerequisites
 -------------
 
 DPAA_SEC driver has similar pre-requisites as described in :ref:`dpaa_overview`.
 -------------
 
 DPAA_SEC driver has similar pre-requisites as described in :ref:`dpaa_overview`.
-The following dependencies are not part of DPDK and must be installed separately:
 
 
-* **NXP Linux SDK**
+See :doc:`../platform/dpaa` for setup information
 
 
-  NXP Linux software development kit (SDK) includes support for the family
-  of QorIQ® ARM-Architecture-based system on chip (SoC) processors
-  and corresponding boards.
 
 
-  It includes the Linux board support packages (BSPs) for NXP SoCs,
-  a fully operational tool chain, kernel and board specific modules.
-
-  SDK and related information can be obtained from:  `NXP QorIQ SDK  <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-* **DPDK Extras Scripts**
-
-  DPAA based resources can be configured easily with the help of ready scripts
-  as provided in the DPDK Extras repository.
-
-  `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
-
-Currently supported by DPDK:
-
-* NXP SDK **2.0+**.
-* Supported architectures:  **arm64 LE**.
-
-* Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
 
 Pre-Installation Configuration
 ------------------------------
 
 Pre-Installation Configuration
 ------------------------------
index 660e980..1600a56 100644 (file)
@@ -53,11 +53,8 @@ AEAD Algorithms
 
 * ``RTE_CRYPTO_AEAD_AES_GCM``
 
 
 * ``RTE_CRYPTO_AEAD_AES_GCM``
 
-Compilation
------------
-
-The **OCTEON TX** :sup:`®` board must be running the linux kernel based on
-sdk-6.2.0 patch 3. In this, the OCTEON TX crypto PF driver is already built in.
+Config flags
+------------
 
 For compiling the OCTEON TX crypto poll mode driver, please check if the
 CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO setting is set to `y` in
 
 For compiling the OCTEON TX crypto poll mode driver, please check if the
 CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO setting is set to `y` in
@@ -65,23 +62,21 @@ config/common_base file.
 
 * ``CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=y``
 
 
 * ``CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=y``
 
-The following are the steps to compile the OCTEON TX crypto poll mode driver:
+Compilation
+-----------
 
 
-.. code-block:: console
+The OCTEON TX crypto poll mode driver can be compiled either natively on
+**OCTEON TX** :sup:`®` board or cross-compiled on an x86 based platform.
 
 
-        cd <dpdk directory>
-        make config T=arm64-thunderx-linuxapp-gcc
-        make
+Refer :doc:`../platform/octeontx` for details about setting up the platform
+and building DPDK applications.
 
 
-The example applications can be compiled using the following:
+.. note::
 
 
-.. code-block:: console
+   OCTEON TX crypto PF driver needs microcode to be available at `/lib/firmware/` directory.
+   Refer SDK documents for further information.
 
 
-        cd <dpdk directory>
-        export RTE_SDK=$PWD
-        export RTE_TARGET=build
-        cd examples/<application>
-        make
+SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
 
 Execution
 ---------
 
 Execution
 ---------
index 2f356d3..cfc4034 100644 (file)
@@ -25,57 +25,17 @@ The DPAA EVENTDEV implements many features in the eventdev API;
 Supported DPAA SoCs
 --------------------
 
 Supported DPAA SoCs
 --------------------
 
-- LS1046A
-- LS1043A
+- LS1046A/LS1026A
+- LS1043A/LS1023A
 
 Prerequisites
 -------------
 
 
 Prerequisites
 -------------
 
-There are following pre-requisites for executing EVENTDEV on a DPAA compatible
-platform:
-
-1. **ARM 64 Tool Chain**
-
-  For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.4-2017.08/aarch64-linux-gnu/>`_.
-
-2. **Linux Kernel**
-
-   It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile System**
-
-   Any *aarch64* supporting filesystem can be used. For example,
-   Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
-   from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-As an alternative method, DPAA EVENTDEV can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
-  NXP Linux software development kit (SDK) includes support for family
-  of QorIQ® ARM-Architecture-based system on chip (SoC) processors
-  and corresponding boards.
-
-  It includes the Linux board support packages (BSPs) for NXP SoCs,
-  a fully operational tool chain, kernel and board specific modules.
-
-  SDK and related information can be obtained from:  `NXP QorIQ SDK  <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-- **DPDK Extra Scripts**
-
-  DPAA based resources can be configured easily with the help of ready to use
-  xml files as provided in the DPDK Extra repository.
-
-  `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa` for setup information
 
 Currently supported by DPDK:
 
 
 Currently supported by DPDK:
 
-- NXP SDK **2.0+** or LSDK **17.09+**
+- NXP SDK **2.0+** or LSDK **18.09+**
 - Supported architectures:  **arm64 LE**.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
 - Supported architectures:  **arm64 LE**.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
index ad94f24..2b1700a 100644 (file)
@@ -26,7 +26,7 @@ The DPAA2 EVENTDEV implements many features in the eventdev API;
 Supported DPAA2 SoCs
 --------------------
 
 Supported DPAA2 SoCs
 --------------------
 
-- LS2080A/LS2040A
+- LX2160A
 - LS2084A/LS2044A
 - LS2088A/LS2048A
 - LS1088A/LS1048A
 - LS2084A/LS2044A
 - LS2088A/LS2048A
 - LS1088A/LS1048A
@@ -34,52 +34,12 @@ Supported DPAA2 SoCs
 Prerequisites
 -------------
 
 Prerequisites
 -------------
 
-There are three main pre-requisities for executing DPAA2 EVENTDEV on a DPAA2
-compatible board:
-
-1. **ARM 64 Tool Chain**
-
-   For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/4.9-2017.01/aarch64-linux-gnu>`_.
-
-2. **Linux Kernel**
-
-   It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile system**
-
-   Any *aarch64* supporting filesystem can be used. For example,
-   Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
-   from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-As an alternative method, DPAA2 EVENTDEV can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA2 board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
-  NXP Linux software development kit (SDK) includes support for family
-  of QorIQ® ARM-Architecture-based system on chip (SoC) processors
-  and corresponding boards.
-
-  It includes the Linux board support packages (BSPs) for NXP SoCs,
-  a fully operational tool chain, kernel and board specific modules.
-
-  SDK and related information can be obtained from:  `NXP QorIQ SDK  <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-- **DPDK Extra Scripts**
-
-  DPAA2 based resources can be configured easily with the help of ready scripts
-  as provided in the DPDK Extra repository.
-
-  `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa2` for setup information
 
 Currently supported by DPDK:
 
 
 Currently supported by DPDK:
 
-- NXP SDK **2.0+**.
-- MC Firmware version **10.0.0** and higher.
+- NXP SDK **18.09+**.
+- MC Firmware version **10.10.0** and higher.
 - Supported architectures:  **arm64 LE**.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
 - Supported architectures:  **arm64 LE**.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
@@ -155,4 +115,4 @@ DPAA2 drivers for DPDK can only work on NXP SoCs as listed in the
 Port-core binding
 ~~~~~~~~~~~~~~~~~
 
 Port-core binding
 ~~~~~~~~~~~~~~~~~
 
-DPAA2 EVENTDEV driver requires event port 'x' to be used on core 'x'.
+DPAA2 EVENTDEV can support only one eventport per core.
index 371a817..8da6a31 100644 (file)
@@ -48,6 +48,13 @@ be loaded as shown below:
    ``vfio-pci`` kernel module rather than ``igb_uio`` or ``uio_pci_generic``.
    For more details see :ref:`linux_gsg_binding_kernel` below.
 
    ``vfio-pci`` kernel module rather than ``igb_uio`` or ``uio_pci_generic``.
    For more details see :ref:`linux_gsg_binding_kernel` below.
 
+.. note::
+
+   If the devices used for DPDK are bound to the ``uio_pci_generic`` kernel module,
+   please make sure that the IOMMU is disabled or passthrough. One can add
+   ``intel_iommu=off`` or ``amd_iommu=off`` or ``intel_iommu=on iommu=pt``in GRUB
+   command line on x86_64 systems, or add ``iommu.passthrough=1`` on arm64 system.
+
 Since DPDK release 1.7 onward provides VFIO support, use of UIO is optional
 for platforms that support using VFIO.
 
 Since DPDK release 1.7 onward provides VFIO support, use of UIO is optional
 for platforms that support using VFIO.
 
index e2230f3..29c5f47 100644 (file)
@@ -64,7 +64,11 @@ Compilation of the DPDK
        x86_x32 ABI is currently supported with distribution packages only on Ubuntu
        higher than 13.10 or recent Debian distribution. The only supported  compiler is gcc 4.9+.
 
        x86_x32 ABI is currently supported with distribution packages only on Ubuntu
        higher than 13.10 or recent Debian distribution. The only supported  compiler is gcc 4.9+.
 
-*   libnuma-devel - library for handling NUMA (Non Uniform Memory Access).
+*   Library for handling NUMA (Non Uniform Memory Access).
+
+    * numactl-devel in Red Hat/Fedora;
+
+    * libnuma-dev in Debian/Ubuntu;
 
 *   Python, version 2.7+ or 3.2+, to use various helper scripts included in the DPDK package.
 
 
 *   Python, version 2.7+ or 3.2+, to use various helper scripts included in the DPDK package.
 
@@ -103,6 +107,13 @@ System Software
 
         uname -r
 
 
         uname -r
 
+.. note::
+
+    Kernel version 3.2 is no longer a kernel.org longterm stable kernel.
+    For DPDK 19.02 the minimum required kernel will be updated to
+    the current kernel.org oldest longterm stable supported kernel 3.16,
+    or recent versions of common distributions, notably RHEL/CentOS 7.
+
 *   glibc >= 2.7 (for features related to cpuset)
 
     The version can be checked using the ``ldd --version`` command.
 *   glibc >= 2.7 (for features related to cpuset)
 
     The version can be checked using the ``ldd --version`` command.
@@ -183,12 +194,6 @@ On a NUMA machine, pages should be allocated explicitly on separate nodes::
 
     For 1G pages, it is not possible to reserve the hugepage memory after the system has booted.
 
 
     For 1G pages, it is not possible to reserve the hugepage memory after the system has booted.
 
-    On IBM POWER system, the nr_overcommit_hugepages should be set to the same value as nr_hugepages.
-    For example, if the required page number is 128, the following commands are used::
-
-        echo 128 > /sys/kernel/mm/hugepages/hugepages-16384kB/nr_hugepages
-        echo 128 > /sys/kernel/mm/hugepages/hugepages-16384kB/nr_overcommit_hugepages
-
 Using Hugepages with the DPDK
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Using Hugepages with the DPDK
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
index 620c045..2173673 100644 (file)
@@ -181,65 +181,8 @@ Supported DPAA SoCs
 Prerequisites
 -------------
 
 Prerequisites
 -------------
 
-There are three main pre-requisities for executing DPAA PMD on a DPAA
-compatible board:
+See :doc:`../platform/dpaa` for setup information
 
 
-1. **ARM 64 Tool Chain**
-
-   For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.4-2017.08/aarch64-linux-gnu/>`_.
-
-2. **Linux Kernel**
-
-   It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile system**
-
-   Any *aarch64* supporting filesystem can be used. For example,
-   Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
-   from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-4. **FMC Tool**
-
-   Before any DPDK application can be executed, the Frame Manager Configuration
-   Tool (FMC) need to be executed to set the configurations of the queues. This
-   includes the queue state, RSS and other policies.
-   This tool can be obtained from `NXP (Freescale) Public Git Repository <https://github.com/qoriq-open-source/fmc>`_.
-
-   This tool needs configuration files which are available in the
-   :ref:`DPDK Extra Scripts <extra_scripts>`, described below for DPDK usages.
-
-As an alternative method, DPAA PMD can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
-  NXP Linux software development kit (SDK) includes support for family
-  of QorIQ® ARM-Architecture-based system on chip (SoC) processors
-  and corresponding boards.
-
-  It includes the Linux board support packages (BSPs) for NXP SoCs,
-  a fully operational tool chain, kernel and board specific modules.
-
-  SDK and related information can be obtained from:  `NXP QorIQ SDK  <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-
-.. _extra_scripts:
-
-- **DPDK Extra Scripts**
-
-  DPAA based resources can be configured easily with the help of ready scripts
-  as provided in the DPDK Extra repository.
-
-  `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
-
-Currently supported by DPDK:
-
-- NXP SDK **2.0+**.
-- Supported architectures:  **arm64 LE**.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>`
   to setup the basic DPDK environment.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>`
   to setup the basic DPDK environment.
index e2f385d..769dc4e 100644 (file)
@@ -409,8 +409,7 @@ Features of the DPAA2 PMD are:
 
 Supported DPAA2 SoCs
 --------------------
 
 Supported DPAA2 SoCs
 --------------------
-
-- LS2080A/LS2040A
+- LX2160A
 - LS2084A/LS2044A
 - LS2088A/LS2048A
 - LS1088A/LS1048A
 - LS2084A/LS2044A
 - LS2088A/LS2048A
 - LS1088A/LS1048A
@@ -418,52 +417,12 @@ Supported DPAA2 SoCs
 Prerequisites
 -------------
 
 Prerequisites
 -------------
 
-There are three main pre-requisities for executing DPAA2 PMD on a DPAA2
-compatible board:
-
-1. **ARM 64 Tool Chain**
-
-   For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.4-2017.08/aarch64-linux-gnu/>`_.
-
-2. **Linux Kernel**
-
-   It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile system**
-
-   Any *aarch64* supporting filesystem can be used. For example,
-   Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
-   from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-As an alternative method, DPAA2 PMD can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA2 board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
-  NXP Linux software development kit (SDK) includes support for family
-  of QorIQ® ARM-Architecture-based system on chip (SoC) processors
-  and corresponding boards.
-
-  It includes the Linux board support packages (BSPs) for NXP SoCs,
-  a fully operational tool chain, kernel and board specific modules.
-
-  SDK and related information can be obtained from:  `NXP QorIQ SDK  <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-- **DPDK Extra Scripts**
-
-  DPAA2 based resources can be configured easily with the help of ready scripts
-  as provided in the DPDK Extra repository.
-
-  `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa2` for setup information
 
 Currently supported by DPDK:
 
 
 Currently supported by DPDK:
 
-- NXP SDK **17.08+**.
-- MC Firmware version **10.3.1** and higher.
+- NXP SDK **18.09+**.
+- MC Firmware version **10.10.0** and higher.
 - Supported architectures:  **arm64 LE**.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
 - Supported architectures:  **arm64 LE**.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
diff --git a/doc/guides/platform/dpaa.rst b/doc/guides/platform/dpaa.rst
new file mode 100644 (file)
index 0000000..3904871
--- /dev/null
@@ -0,0 +1,103 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright 2018 NXP
+
+NXP QorIQ DPAA Board Support Package
+====================================
+
+This doc has information about steps to setup QorIq dpaa
+based layerscape platform and information about common offload
+hw block drivers of **NXP QorIQ DPAA** SoC family.
+
+Supported DPAA SoCs
+--------------------
+
+* LS1046A/LS1026A
+* LS1043A/LS1023A
+
+More information about SoC can be found at `NXP Official Website
+<https://www.nxp.com/products/processors-and-microcontrollers/arm-based-
+processors-and-mcus/qoriq-layerscape-arm-processors:QORIQ-ARM>`_.
+
+
+Common Offload HW Block Drivers
+-------------------------------
+
+1. **Nics Driver**
+
+   See :doc:`../nics/dpaa` for NXP dpaa nic driver information.
+
+2. **Cryptodev Driver**
+
+   See :doc:`../cryptodevs/dpaa_sec` for NXP dpaa cryptodev driver information.
+
+3. **Eventdev Driver**
+
+   See :doc:`../eventdevs/dpaa` for NXP dpaa eventdev driver information.
+
+
+Steps To Setup Platform
+-----------------------
+
+There are four main pre-requisities for executing DPAA PMD on a DPAA
+compatible board:
+
+1. **ARM 64 Tool Chain**
+
+   For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/7.3-2018.05/aarch64-linux-gnu/gcc-linaro-7.3.1-2018.05-i686_aarch64-linux-gnu.tar.xz>`_.
+
+2. **Linux Kernel**
+
+   It can be obtained from `NXP's Github hosting <https://source.codeaurora.org/external/qoriq/qoriq-components/linux>`_.
+
+3. **Rootfile system**
+
+   Any *aarch64* supporting filesystem can be used. For example,
+   Ubuntu 16.04 LTS (Xenial) or 18.04 (Bionic) userland which can be obtained
+   from `here
+   <http://cdimage.ubuntu.com/ubuntu-base/releases/18.04/release/ubuntu-base-18.04.1-base-arm64.tar.gz>`_.
+
+4. **FMC Tool**
+
+   Before any DPDK application can be executed, the Frame Manager Configuration
+   Tool (FMC) need to be executed to set the configurations of the queues. This
+   includes the queue state, RSS and other policies.
+   This tool can be obtained from `NXP (Freescale) Public Git Repository <https://source.codeaurora.org/external/qoriq/qoriq-components/fmc>`_.
+
+   This tool needs configuration files which are available in the
+   :ref:`DPDK Extra Scripts <extra_scripts>`, described below for DPDK usages.
+
+As an alternative method, DPAA PMD can also be executed using images provided
+as part of SDK from NXP. The SDK includes all the above prerequisites necessary
+to bring up a DPAA board.
+
+The following dependencies are not part of DPDK and must be installed
+separately:
+
+- **NXP Linux SDK**
+
+  NXP Linux software development kit (SDK) includes support for family
+  of QorIQ® ARM-Architecture-based system on chip (SoC) processors
+  and corresponding boards.
+
+  It includes the Linux board support packages (BSPs) for NXP SoCs,
+  a fully operational tool chain, kernel and board specific modules.
+
+  SDK and related information can be obtained from:  `NXP QorIQ SDK  <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
+
+
+.. _extra_scripts:
+
+- **DPDK Extra Scripts**
+
+  DPAA based resources can be configured easily with the help of ready scripts
+  as provided in the DPDK Extra repository.
+
+  `DPDK Extras Scripts <https://source.codeaurora.org/external/qoriq/qoriq-components/dpdk-extras>`_.
+
+Currently supported by DPDK:
+
+- NXP SDK **2.0+** (preferred: LSDK 18.09).
+- Supported architectures:  **arm64 LE**.
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>`
+  to setup the basic DPDK environment.
diff --git a/doc/guides/platform/dpaa2.rst b/doc/guides/platform/dpaa2.rst
new file mode 100644 (file)
index 0000000..5a64406
--- /dev/null
@@ -0,0 +1,109 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright 2018 NXP
+
+NXP QorIQ DPAA2 Board Support Package
+=====================================
+
+This doc has information about steps to setup NXP QoriQ DPAA2 platform
+and information about common offload hw block drivers of
+**NXP QorIQ DPAA2** SoC family.
+
+Supported DPAA2 SoCs
+--------------------
+
+- LX2160A
+- LS2084A/LS2044A
+- LS2088A/LS2048A
+- LS1088A/LS1048A
+
+More information about SoC can be found at `NXP Official Website
+<https://www.nxp.com/products/processors-and-microcontrollers/arm-based-
+processors-and-mcus/qoriq-layerscape-arm-processors:QORIQ-ARM>`_.
+
+
+Common Offload HW Block Drivers
+-------------------------------
+
+1. **Nics Driver**
+
+   See :doc:`../nics/dpaa2` for NXP dpaa2 nic driver information.
+
+2. **Cryptodev Driver**
+
+   See :doc:`../cryptodevs/dpaa2_sec` for NXP dpaa2 cryptodev driver information.
+
+3. **Eventdev Driver**
+
+   See :doc:`../eventdevs/dpaa2` for NXP dpaa2 eventdev driver information.
+
+4. **Rawdev AIOP CMDIF Driver**
+
+   See :doc:`../rawdevs/dpaa2_cmdif` for NXP dpaa2 AIOP command interface driver information.
+
+5. **Rawdev QDMA Driver**
+
+   See :doc:`../rawdevs/dpaa2_qdma` for NXP dpaa2 QDMA driver information.
+
+
+Steps To Setup Platform
+-----------------------
+
+There are four main pre-requisities for executing DPAA2 PMD on a DPAA2
+compatible board:
+
+1. **ARM 64 Tool Chain**
+
+   For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/7.3-2018.05/aarch64-linux-gnu/gcc-linaro-7.3.1-2018.05-i686_aarch64-linux-gnu.tar.xz>`_.
+
+2. **Linux Kernel**
+
+   It can be obtained from `NXP's Github hosting <https://source.codeaurora.org/external/qoriq/qoriq-components/linux>`_.
+
+3. **Rootfile system**
+
+   Any *aarch64* supporting filesystem can be used. For example,
+   Ubuntu 16.04 LTS (Xenial) or 18.04 (Bionic) userland which can be obtained
+   from `here
+   <http://cdimage.ubuntu.com/ubuntu-base/releases/18.04/release/ubuntu-base-18.04.1-base-arm64.tar.gz>`_.
+
+4. **Resource Scripts**
+
+   DPAA2 based resources can be configured easily with the help of ready scripts
+   as provided in the DPDK Extra repository.
+
+As an alternative method, DPAA2 PMD can also be executed using images provided
+as part of SDK from NXP. The SDK includes all the above prerequisites necessary
+to bring up a DPAA2 board.
+
+The following dependencies are not part of DPDK and must be installed
+separately:
+
+- **NXP Linux SDK**
+
+  NXP Linux software development kit (SDK) includes support for family
+  of QorIQ® ARM-Architecture-based system on chip (SoC) processors
+  and corresponding boards.
+
+  It includes the Linux board support packages (BSPs) for NXP SoCs,
+  a fully operational tool chain, kernel and board specific modules.
+
+  SDK and related information can be obtained from:  `NXP QorIQ SDK  <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
+
+
+.. _extra_scripts:
+
+- **DPDK Extra Scripts**
+
+  DPAA2 based resources can be configured easily with the help of ready scripts
+  as provided in the DPDK Extra repository.
+
+  `DPDK Extras Scripts <https://source.codeaurora.org/external/qoriq/qoriq-components/dpdk-extras>`_.
+
+Currently supported by DPDK:
+
+- NXP SDK **2.0+** (preferred: LSDK 18.09).
+- MC Firmware version **10.10.0** and higher.
+- Supported architectures:  **arm64 LE**.
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>`
+  to setup the basic DPDK environment.
index ca6113d..98c6638 100644 (file)
@@ -10,4 +10,6 @@ The following are platform specific guides and setup information.
     :maxdepth: 2
     :numbered:
 
     :maxdepth: 2
     :numbered:
 
+    dpaa
+    dpaa2
     octeontx
     octeontx
index 9f75d2a..3bde91f 100644 (file)
@@ -15,11 +15,15 @@ More information about SoC can be found at `Cavium, Inc Official Website
 Common Offload HW Block Drivers
 -------------------------------
 
 Common Offload HW Block Drivers
 -------------------------------
 
-1. **Eventdev Driver**
+1. **Crypto Driver**
+   See :doc:`../cryptodevs/octeontx` for octeontx crypto driver
+   information.
+
+2. **Eventdev Driver**
    See :doc:`../eventdevs/octeontx` for octeontx ssovf eventdev driver
    information.
 
    See :doc:`../eventdevs/octeontx` for octeontx ssovf eventdev driver
    information.
 
-2. **Mempool Driver**
+3. **Mempool Driver**
    See :doc:`../mempool/octeontx` for octeontx fpavf mempool driver
    information.
 
    See :doc:`../mempool/octeontx` for octeontx fpavf mempool driver
    information.
 
@@ -35,6 +39,12 @@ OCTEON TX compatible board:
    Platform drivers) are available on Github at `octeontx-kmod <https://github.com/caviumnetworks/octeontx-kmod>`_
    along with build, install and dpdk usage instructions.
 
    Platform drivers) are available on Github at `octeontx-kmod <https://github.com/caviumnetworks/octeontx-kmod>`_
    along with build, install and dpdk usage instructions.
 
+.. note::
+
+   The PF driver and the required microcode for the crypto offload block will be
+   available with OCTEON TX SDK only. So for using crypto offload, follow the steps
+   mentioned in :ref:`setup_platform_using_OCTEON_TX_SDK`.
+
 2. **ARM64 Tool Chain**
 
    For example, the *aarch64* Linaro Toolchain, which can be obtained from
 2. **ARM64 Tool Chain**
 
    For example, the *aarch64* Linaro Toolchain, which can be obtained from
@@ -48,8 +58,104 @@ OCTEON TX compatible board:
 
    As an alternative method, Platform drivers can also be executed using images provided
    as part of SDK from Cavium. The SDK includes all the above prerequisites necessary
 
    As an alternative method, Platform drivers can also be executed using images provided
    as part of SDK from Cavium. The SDK includes all the above prerequisites necessary
-   to bring up a OCTEON TX board.
-
-   SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
+   to bring up a OCTEON TX board. Please refer :ref:`setup_platform_using_OCTEON_TX_SDK`.
 
 - Follow the DPDK :doc:`../linux_gsg/index` to setup the basic DPDK environment.
 
 - Follow the DPDK :doc:`../linux_gsg/index` to setup the basic DPDK environment.
+
+.. _setup_platform_using_OCTEON_TX_SDK:
+
+Setup Platform Using OCTEON TX SDK
+----------------------------------
+
+The OCTEON TX platform drivers can be compiled either natively on
+**OCTEON TX** :sup:`®` board or cross-compiled on an x86 based platform.
+
+The **OCTEON TX** :sup:`®` board must be running the linux kernel based on
+OCTEON TX SDK 6.2.0 patch 3. In this, the PF drivers for all hardware
+offload blocks are already built in.
+
+Native Compilation
+~~~~~~~~~~~~~~~~~~
+
+If the kernel and modules are cross-compiled and copied to the target board,
+some intermediate binaries required for native build would be missing on the
+target board. To make sure all the required binaries are available in the
+native architecture, the linux sources need to be compiled once natively.
+
+.. code-block:: console
+
+        cd /lib/modules/$(uname -r)/source
+        make menuconfig
+        make
+
+The above steps would rebuild the modules and the required intermediate binaries.
+Once the target is ready for native compilation, the OCTEON TX platform
+drivers can be compiled with the following steps,
+
+.. code-block:: console
+
+        cd <dpdk directory>
+        make config T=arm64-thunderx-linuxapp-gcc
+        make
+
+The example applications can be compiled using the following:
+
+.. code-block:: console
+
+        cd <dpdk directory>
+        export RTE_SDK=$PWD
+        export RTE_TARGET=build
+        cd examples/<application>
+        make
+
+Cross Compilation
+~~~~~~~~~~~~~~~~~
+
+The DPDK applications can be cross-compiled on any x86 based platform. The
+OCTEON TX SDK need to be installed on the build system. The SDK package will
+provide the required toolchain etc.
+
+Refer to :doc:`../linux_gsg/cross_build_dpdk_for_arm64` for further steps on
+compilation. The 'host' & 'CC' to be used in the commands would change,
+in addition to the paths to which libnuma related files have to be
+copied.
+
+The following steps can be used to perform cross-compilation with OCTEON TX
+SDK 6.2.0 patch 3:
+
+.. code-block:: console
+
+        cd <sdk_install_dir>
+        source env-setup
+
+        git clone https://github.com/numactl/numactl.git
+        cd numactl
+        git checkout v2.0.11 -b v2.0.11
+        ./autogen.sh
+        autoconf -i
+        ./configure --host=aarch64-thunderx-linux CC=aarch64-thunderx-linux-gnu-gcc --prefix=<numa install dir>
+        make install
+
+The above steps will prepare build system with numa additions. Now this build system can be used
+to build applications for **OCTEON TX** :sup:`®` platforms.
+
+.. code-block:: console
+
+        cd <dpdk directory>
+        export RTE_SDK=$PWD
+        export RTE_KERNELDIR=$THUNDER_ROOT/linux/kernel/linux
+        make config T=arm64-thunderx-linuxapp-gcc
+        make -j CROSS=aarch64-thunderx-linux-gnu- CONFIG_RTE_KNI_KMOD=n CONFIG_RTE_EAL_IGB_UIO=n EXTRA_CFLAGS="-isystem <numa_install_dir>/include" EXTRA_LDFLAGS="-L<numa_install_dir>/lib -lnuma"
+
+If NUMA support is not required, it can be disabled as explained in
+:doc:`../linux_gsg/cross_build_dpdk_for_arm64`.
+
+Following steps could be used in that case.
+
+.. code-block:: console
+
+        make config T=arm64-thunderx-linuxapp-gcc
+        make CROSS=aarch64-thunderx-linux-gnu-
+
+
+SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
index 5cbe17c..e68e4e7 100644 (file)
@@ -271,3 +271,12 @@ de-initialise the latency library.
 .. code-block:: c
 
     rte_latencystats_uninit();
 .. code-block:: c
 
     rte_latencystats_uninit();
+
+Timestamp and latency calculation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Latency stats library marks the time in the timestamp field of the
+mbuf for the ingress packets and sets the ``PKT_RX_TIMESTAMP`` flag of
+``ol_flags`` for the mbuf to indicate the marked time as a valid one.
+At the egress, the mbufs with the flag set are considered having valid
+timestamp and are used for the latency calculation.
index 20a6099..bebda83 100644 (file)
@@ -29,52 +29,12 @@ Supported DPAA2 SoCs
 Prerequisites
 -------------
 
 Prerequisites
 -------------
 
-There are three main pre-requisities for executing DPAA2 CMDIF on a DPAA2
-compatible board:
-
-1. **ARM 64 Tool Chain**
-
-   For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.3-2017.02/aarch64-linux-gnu>`_.
-
-2. **Linux Kernel**
-
-   It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile system**
-
-   Any *aarch64* supporting filesystem can be used. For example,
-   Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
-   from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-As an alternative method, DPAA2 CMDIF can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA2 board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
-  NXP Linux software development kit (SDK) includes support for family
-  of QorIQ® ARM-Architecture-based system on chip (SoC) processors
-  and corresponding boards.
-
-  It includes the Linux board support packages (BSPs) for NXP SoCs,
-  a fully operational tool chain, kernel and board specific modules.
-
-  SDK and related information can be obtained from:  `NXP QorIQ SDK  <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-- **DPDK Extra Scripts**
-
-  DPAA2 based resources can be configured easily with the help of ready scripts
-  as provided in the DPDK Extra repository.
-
-  `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa2` for setup information
 
 Currently supported by DPDK:
 
 
 Currently supported by DPDK:
 
-- NXP SDK **2.0+**.
-- MC Firmware version **10.0.0** and higher.
+- NXP SDK **18.09+**.
+- MC Firmware version **10.10.0** and higher.
 - Supported architectures:  **arm64 LE**.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
 - Supported architectures:  **arm64 LE**.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
index b9bc4ec..793a851 100644 (file)
@@ -25,6 +25,7 @@ The DPAA2 QDMA implements following features in the rawdev API;
 Supported DPAA2 SoCs
 --------------------
 
 Supported DPAA2 SoCs
 --------------------
 
+- LX2160A
 - LS2084A/LS2044A
 - LS2088A/LS2048A
 - LS1088A/LS1048A
 - LS2084A/LS2044A
 - LS2088A/LS2048A
 - LS1088A/LS1048A
@@ -32,52 +33,12 @@ Supported DPAA2 SoCs
 Prerequisites
 -------------
 
 Prerequisites
 -------------
 
-There are three main pre-requisities for executing DPAA2 QDMA on a DPAA2
-compatible board:
-
-1. **ARM 64 Tool Chain**
-
-   For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.3-2017.02/aarch64-linux-gnu>`_.
-
-2. **Linux Kernel**
-
-   It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile system**
-
-   Any *aarch64* supporting filesystem can be used. For example,
-   Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
-   from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-As an alternative method, DPAA2 QDMA can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA2 board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
-  NXP Linux software development kit (SDK) includes support for family
-  of QorIQ® ARM-Architecture-based system on chip (SoC) processors
-  and corresponding boards.
-
-  It includes the Linux board support packages (BSPs) for NXP SoCs,
-  a fully operational tool chain, kernel and board specific modules.
-
-  SDK and related information can be obtained from:  `NXP QorIQ SDK  <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-- **DPDK Extra Scripts**
-
-  DPAA2 based resources can be configured easily with the help of ready scripts
-  as provided in the DPDK Extra repository.
-
-  `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa2` for setup information
 
 Currently supported by DPDK:
 
 
 Currently supported by DPDK:
 
-- NXP LSDK **17.12+**.
-- MC Firmware version **10.3.0** and higher.
+- NXP SDK **18.09+**.
+- MC Firmware version **10.10.0** and higher.
 - Supported architectures:  **arm64 LE**.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
 - Supported architectures:  **arm64 LE**.
 
 - Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
index 51d0075..32ff0e5 100644 (file)
@@ -279,6 +279,25 @@ New Features
   their telemetry via a UNIX socket in JSON. The JSON can be consumed by any
   Service Assurance agent, such as CollectD.
 
   their telemetry via a UNIX socket in JSON. The JSON can be consumed by any
   Service Assurance agent, such as CollectD.
 
+* **Updated KNI kernel module, rte_kni library, and KNI sample application.**
+
+  Updated the KNI kernel module with a new kernel module parameter,
+  ``carrier=[on|off]`` to allow the user to control the default carrier
+  state of KNI kernel network interfaces.  The default carrier state
+  is now set to ``off``, so the interfaces cannot be used until the
+  carrier state is set to ``on`` via ``rte_kni_update_link`` or
+  by writing ``1`` to ``/sys/devices/virtual/net/<iface>/carrier``.
+  In previous versions the default carrier state was left undefined.
+  See :doc:`../prog_guide/kernel_nic_interface` for more information.
+
+  Added the new API function ``rte_kni_update_link`` to allow the user
+  to set the carrier state of the KNI kernel network interface.
+
+  Added a new command line flag ``-m`` to the KNI sample application to
+  monitor and automatically reflect the physical NIC carrier state to the
+  KNI kernel network interface with the new ``rte_kni_update_link`` API.
+  See :doc:`../sample_app_ug/kernel_nic_interface` for more information.
+
 * **Added ability to switch queue deferred start flag on testpmd app.**
 
   Added a console command to testpmd app, giving ability to switch
 * **Added ability to switch queue deferred start flag on testpmd app.**
 
   Added a console command to testpmd app, giving ability to switch
@@ -385,6 +404,20 @@ API Changes
 * eventdev: Type of 2nd parameter to ``rte_event_eth_rx_adapter_caps_get()``
   has been changed from uint8_t to uint16_t.
 
 * eventdev: Type of 2nd parameter to ``rte_event_eth_rx_adapter_caps_get()``
   has been changed from uint8_t to uint16_t.
 
+* kni: By default, interface carrier status is ``off`` which means there won't
+  be any traffic. It can be set to ``on`` via ``rte_kni_update_link()`` API
+  or via ``sysfs`` interface:
+  ``echo 1 > /sys/class/net/vEth0/carrier``.
+  Note interface should be ``up`` to be able to read/write sysfs interface.
+  When KNI sample application is used, ``-m`` parameter can be used to
+  automatically update the carrier status for the interface.
+
+* kni: When ethtool support enabled (``CONFIG_RTE_KNI_KMOD_ETHTOOL=y``)
+  ethtool commands ``ETHTOOL_GSET & ETHTOOL_SSET`` are no more supported for the
+  kernels that has ``ETHTOOL_GLINKSETTINGS & ETHTOOL_SLINKSETTINGS`` support.
+  This means ``ethtool "-a|--show-pause", "-s|--change"`` won't work, and
+  ``ethtool <iface>`` output will have less information.
+
 
 ABI Changes
 -----------
 
 ABI Changes
 -----------
@@ -532,6 +565,8 @@ Known Issues
   driver; the Linux netvsc device must be brought up before the netvsc device is
   unbound and passed to the DPDK.
 
   driver; the Linux netvsc device must be brought up before the netvsc device is
   unbound and passed to the DPDK.
 
+* IBM Power8 is not supported by this release of DPDK. IBM Power9 is supported.
+
 
 Tested Platforms
 ----------------
 
 Tested Platforms
 ----------------
index a2d75ed..6f04743 100644 (file)
@@ -9,7 +9,6 @@ This section explains how to compile the DPDK sample applications.
 To compile all the sample applications
 --------------------------------------
 
 To compile all the sample applications
 --------------------------------------
 
-
 Set the path to DPDK source code if its not set:
 
     .. code-block:: console
 Set the path to DPDK source code if its not set:
 
     .. code-block:: console
@@ -93,3 +92,17 @@ Build the application:
 
         export RTE_TARGET=build
         make
 
         export RTE_TARGET=build
         make
+
+To cross compile the sample application(s)
+------------------------------------------
+
+For cross compiling the sample application(s), please append 'CROSS=$(CROSS_COMPILER_PREFIX)' to the 'make' command.
+In example of AARCH64 cross compiling:
+
+    .. code-block:: console
+
+        export RTE_TARGET=build
+        export RTE_SDK=/path/to/rte_sdk
+        make -C examples CROSS=aarch64-linux-gnu-
+               or
+        make CROSS=aarch64-linux-gnu-
index 0d6fe2b..840d557 100644 (file)
@@ -367,7 +367,7 @@ The forwarding loop can be interrupted and the application closed using
 The generate_ipv4_flow function
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 The generate_ipv4_flow function
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-The generate_ipv4_rule function is responsible for creating the flow rule.
+The generate_ipv4_flow function is responsible for creating the flow rule.
 This function is located in the ``flow_blocks.c`` file.
 
 .. code-block:: c
 This function is located in the ``flow_blocks.c`` file.
 
 .. code-block:: c
index 18912cd..e1b56d7 100644 (file)
@@ -23,8 +23,8 @@ There are two key differences from the L2 Forwarding sample application:
 
 *   The second difference is that the application differentiates between IP and non-IP traffic by means of offload flags.
 
 
 *   The second difference is that the application differentiates between IP and non-IP traffic by means of offload flags.
 
-The Longest Prefix Match (LPM for IPv4, LPM6 for IPv6) table is used to store/lookup an outgoing port number, associated with that IPv4 address. Any unmatched packets are forwarded to the originating port.Compiling the Application
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+The Longest Prefix Match (LPM for IPv4, LPM6 for IPv6) table is used to store/lookup an outgoing port number,
+associated with that IPv4 address. Any unmatched packets are forwarded to the originating port.
 
 
 Compiling the Application
 
 
 Compiling the Application
index ce1474e..f6efa7f 100644 (file)
@@ -319,7 +319,6 @@ It is the mcast_out_pkt() function that performs the packet duplication (either
         hdr->pkt.in_port = pkt->pkt.in_port;
         hdr->pkt.vlan_macip = pkt->pkt.vlan_macip;
         hdr->pkt.hash = pkt->pkt.hash;
         hdr->pkt.in_port = pkt->pkt.in_port;
         hdr->pkt.vlan_macip = pkt->pkt.vlan_macip;
         hdr->pkt.hash = pkt->pkt.hash;
-        hdr->ol_flags = pkt->ol_flags;
         rte_mbuf_sanity_check(hdr, RTE_MBUF_PKT, 1);
 
         return hdr;
         rte_mbuf_sanity_check(hdr, RTE_MBUF_PKT, 1);
 
         return hdr;
index 1ad4f14..5be9f24 100644 (file)
@@ -657,6 +657,31 @@ To build just the ``guest_vm_power_manager`` application using ``make``:
 
 The resulting binary will be ${RTE_SDK}/build/examples/guest_cli
 
 
 The resulting binary will be ${RTE_SDK}/build/examples/guest_cli
 
+.. Note::
+  This sample application conditionally links in the Jansson JSON
+  library, so if you are using a multilib or cross compile environment you
+  may need to set the ``PKG_CONFIG_LIBDIR`` environmental variable to point to
+  the relevant pkgconfig folder so that the correct library is linked in.
+
+  For example, if you are building for a 32-bit target, you could find the
+  correct directory using the following ``find`` command:
+
+  .. code-block:: console
+
+      # find /usr -type d -name pkgconfig
+      /usr/lib/i386-linux-gnu/pkgconfig
+      /usr/lib/x86_64-linux-gnu/pkgconfig
+
+  Then use:
+
+  .. code-block:: console
+
+      export PKG_CONFIG_LIBDIR=/usr/lib/i386-linux-gnu/pkgconfig
+
+  You then use the make command as normal, which should find the 32-bit
+  version of the library, if it installed. If not, the application will
+  be built without the JSON interface functionality.
+
 To build just the ``vm_power_manager`` application using ``meson/ninja``:
 
 .. code-block:: console
 To build just the ``vm_power_manager`` application using ``meson/ninja``:
 
 .. code-block:: console
index 305cc06..ffd26f1 100644 (file)
@@ -19,6 +19,7 @@
 #include <rte_vfio.h>
 #include <rte_eal.h>
 #include <rte_bus.h>
 #include <rte_vfio.h>
 #include <rte_eal.h>
 #include <rte_bus.h>
+#include <rte_spinlock.h>
 
 #include "eal_filesystem.h"
 
 
 #include "eal_filesystem.h"
 
@@ -282,6 +283,14 @@ pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
 }
 
 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
 }
 
 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+/*
+ * Spinlock for device hot-unplug failure handling.
+ * If it tries to access bus or device, such as handle sigbus on bus
+ * or handle memory failure for device, just need to use this lock.
+ * It could protect the bus and the device to avoid race condition.
+ */
+static rte_spinlock_t failure_handle_lock = RTE_SPINLOCK_INITIALIZER;
+
 static void
 pci_vfio_req_handler(void *param)
 {
 static void
 pci_vfio_req_handler(void *param)
 {
@@ -289,11 +298,12 @@ pci_vfio_req_handler(void *param)
        int ret;
        struct rte_device *device = (struct rte_device *)param;
 
        int ret;
        struct rte_device *device = (struct rte_device *)param;
 
+       rte_spinlock_lock(&failure_handle_lock);
        bus = rte_bus_find_by_device(device);
        if (bus == NULL) {
                RTE_LOG(ERR, EAL, "Cannot find bus for device (%s)\n",
                        device->name);
        bus = rte_bus_find_by_device(device);
        if (bus == NULL) {
                RTE_LOG(ERR, EAL, "Cannot find bus for device (%s)\n",
                        device->name);
-               return;
+               goto handle_end;
        }
 
        /*
        }
 
        /*
@@ -306,6 +316,8 @@ pci_vfio_req_handler(void *param)
                RTE_LOG(ERR, EAL,
                        "Can not handle hot-unplug for device (%s)\n",
                        device->name);
                RTE_LOG(ERR, EAL,
                        "Can not handle hot-unplug for device (%s)\n",
                        device->name);
+handle_end:
+       rte_spinlock_unlock(&failure_handle_lock);
 }
 
 /* enable notifier (only enable req now) */
 }
 
 /* enable notifier (only enable req now) */
index c5a9f34..5933ea7 100644 (file)
@@ -3449,32 +3449,27 @@ find_kasumif9_direction_and_length(uint8_t *src,
                                   uint8_t *addr_direction)
 {
        uint8_t found = 0;
                                   uint8_t *addr_direction)
 {
        uint8_t found = 0;
+       uint32_t pos;
+       uint8_t last_byte;
        while (!found && counter_num_bytes > 0) {
                counter_num_bytes--;
                if (src[counter_num_bytes] == 0x00)
                        continue;
        while (!found && counter_num_bytes > 0) {
                counter_num_bytes--;
                if (src[counter_num_bytes] == 0x00)
                        continue;
-               if (src[counter_num_bytes] == 0x80) {
-                       *addr_direction  =  src[counter_num_bytes - 1] & 0x1;
-                       *addr_length_in_bits = counter_num_bytes * 8  - 1;
-                       found = 1;
-               } else {
-                       int i = 0;
-                       uint8_t last_byte = src[counter_num_bytes];
-                       for (i = 0; i < 8 && found == 0; i++) {
-                               if (last_byte & (1 << i)) {
-                                       *addr_direction = (last_byte >> (i+1))
-                                                         & 0x1;
-                                       if (i != 6)
-                                               *addr_length_in_bits =
-                                                       counter_num_bytes * 8
-                                                       + (8 - (i + 2));
-                                       else
-                                               *addr_length_in_bits =
-                                                       counter_num_bytes * 8;
-                                       found = 1;
-                                       }
-                               }
+               pos = rte_bsf32(src[counter_num_bytes]);
+               if (pos == 7) {
+                       if (likely(counter_num_bytes > 0)) {
+                               last_byte = src[counter_num_bytes - 1];
+                               *addr_direction  =  last_byte & 0x1;
+                               *addr_length_in_bits = counter_num_bytes * 8
+                                                       - 1;
                        }
                        }
+               } else {
+                       last_byte = src[counter_num_bytes];
+                       *addr_direction = (last_byte >> (pos + 1)) & 0x1;
+                       *addr_length_in_bits = counter_num_bytes * 8
+                                               + (8 - (pos + 2));
+               }
+               found = 1;
        }
 }
 
        }
 }
 
index 898d2f3..ffc835d 100644 (file)
                PKT_TX_TCP_SEG)
 
 #define AVF_TX_OFFLOAD_MASK (  \
                PKT_TX_TCP_SEG)
 
 #define AVF_TX_OFFLOAD_MASK (  \
+               PKT_TX_OUTER_IPV6 |              \
+               PKT_TX_OUTER_IPV4 |              \
+               PKT_TX_IPV6 |                    \
+               PKT_TX_IPV4 |                    \
                PKT_TX_VLAN_PKT |                \
                PKT_TX_IP_CKSUM |                \
                PKT_TX_L4_MASK |                 \
                PKT_TX_VLAN_PKT |                \
                PKT_TX_IP_CKSUM |                \
                PKT_TX_L4_MASK |                 \
index 2661620..44deaf1 100644 (file)
@@ -3423,9 +3423,16 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
                                     "Failed to parse agg selection mode for bonded device %s",
                                     name);
                }
                                     "Failed to parse agg selection mode for bonded device %s",
                                     name);
                }
-               if (internals->mode == BONDING_MODE_8023AD)
-                       rte_eth_bond_8023ad_agg_selection_set(port_id,
-                                                             agg_mode);
+               if (internals->mode == BONDING_MODE_8023AD) {
+                       int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
+                                       agg_mode);
+                       if (ret < 0) {
+                               RTE_BOND_LOG(ERR,
+                                       "Invalid args for agg selection set for bonded device %s",
+                                       name);
+                               return -1;
+                       }
+               }
        }
 
        /* Parse/add slave ports to bonded device */
        }
 
        /* Parse/add slave ports to bonded device */
index 05a4fbe..3690afe 100644 (file)
@@ -1096,6 +1096,7 @@ static int ena_create_io_queue(struct ena_ring *ring)
                { ENA_ADMIN_PLACEMENT_POLICY_HOST,
                  0, 0, 0, 0, 0 };
        uint16_t ena_qid;
                { ENA_ADMIN_PLACEMENT_POLICY_HOST,
                  0, 0, 0, 0, 0 };
        uint16_t ena_qid;
+       unsigned int i;
        int rc;
 
        adapter = ring->adapter;
        int rc;
 
        adapter = ring->adapter;
@@ -1106,10 +1107,14 @@ static int ena_create_io_queue(struct ena_ring *ring)
                ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
                ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
                ctx.queue_size = adapter->tx_ring_size;
                ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
                ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
                ctx.queue_size = adapter->tx_ring_size;
+               for (i = 0; i < ring->ring_size; i++)
+                       ring->empty_tx_reqs[i] = i;
        } else {
                ena_qid = ENA_IO_RXQ_IDX(ring->id);
                ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
                ctx.queue_size = adapter->rx_ring_size;
        } else {
                ena_qid = ENA_IO_RXQ_IDX(ring->id);
                ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
                ctx.queue_size = adapter->rx_ring_size;
+               for (i = 0; i < ring->ring_size; i++)
+                       ring->empty_rx_reqs[i] = i;
        }
        ctx.qid = ena_qid;
        ctx.msix_vector = -1; /* interrupts not used */
        }
        ctx.qid = ena_qid;
        ctx.msix_vector = -1; /* interrupts not used */
@@ -1152,6 +1157,8 @@ static void ena_free_io_queues_all(struct ena_adapter *adapter)
        for (i = 0; i < nb_txq; ++i) {
                ena_qid = ENA_IO_TXQ_IDX(i);
                ena_com_destroy_io_queue(ena_dev, ena_qid);
        for (i = 0; i < nb_txq; ++i) {
                ena_qid = ENA_IO_TXQ_IDX(i);
                ena_com_destroy_io_queue(ena_dev, ena_qid);
+
+               ena_tx_queue_release_bufs(&adapter->tx_ring[i]);
        }
 
        for (i = 0; i < nb_rxq; ++i) {
        }
 
        for (i = 0; i < nb_rxq; ++i) {
index 790ecc3..7030eb1 100644 (file)
@@ -11609,6 +11609,32 @@ i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
        return 0;
 }
 
        return 0;
 }
 
+/**
+ * This function is used to check if the register is valid.
+ * Below is the valid registers list for X722 only:
+ * 0x2b800--0x2bb00
+ * 0x38700--0x38a00
+ * 0x3d800--0x3db00
+ * 0x208e00--0x209000
+ * 0x20be00--0x20c000
+ * 0x263c00--0x264000
+ * 0x265c00--0x266000
+ */
+static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
+{
+       if ((type != I40E_MAC_X722) &&
+           ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
+            (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
+            (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
+            (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
+            (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
+            (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
+            (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
+               return 0;
+       else
+               return 1;
+}
+
 static int i40e_get_regs(struct rte_eth_dev *dev,
                         struct rte_dev_reg_info *regs)
 {
 static int i40e_get_regs(struct rte_eth_dev *dev,
                         struct rte_dev_reg_info *regs)
 {
@@ -11650,8 +11676,11 @@ static int i40e_get_regs(struct rte_eth_dev *dev,
                                reg_offset = arr_idx * reg_info->stride1 +
                                        arr_idx2 * reg_info->stride2;
                                reg_offset += reg_info->base_addr;
                                reg_offset = arr_idx * reg_info->stride1 +
                                        arr_idx2 * reg_info->stride2;
                                reg_offset += reg_info->base_addr;
-                               ptr_data[reg_offset >> 2] =
-                                       I40E_READ_REG(hw, reg_offset);
+                               if (!i40e_valid_regs(hw->mac.type, reg_offset))
+                                       ptr_data[reg_offset >> 2] = 0;
+                               else
+                                       ptr_data[reg_offset >> 2] =
+                                               I40E_READ_REG(hw, reg_offset);
                        }
        }
 
                        }
        }
 
@@ -12554,8 +12583,6 @@ i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
                return -EINVAL;
        if (!in->key && in->key_len)
                return -EINVAL;
                return -EINVAL;
        if (!in->key && in->key_len)
                return -EINVAL;
-       if (in->key)
-               out->conf.key = memcpy(out->key, in->key, in->key_len);
        out->conf = (struct rte_flow_action_rss){
                .func = in->func,
                .level = in->level,
        out->conf = (struct rte_flow_action_rss){
                .func = in->func,
                .level = in->level,
@@ -12565,6 +12592,8 @@ i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
                .queue = memcpy(out->queue, in->queue,
                                sizeof(*in->queue) * in->queue_num),
        };
                .queue = memcpy(out->queue, in->queue,
                                sizeof(*in->queue) * in->queue_num),
        };
+       if (in->key)
+               out->conf.key = memcpy(out->key, in->key, in->key_len);
        return 0;
 }
 
        return 0;
 }
 
index 21f973e..fb50719 100644 (file)
@@ -5259,7 +5259,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                ixgbe_flap_tx_laser(hw);
 
                /* Wait for the controller to acquire link.  Per IEEE 802.3ap,
                ixgbe_flap_tx_laser(hw);
 
                /* Wait for the controller to acquire link.  Per IEEE 802.3ap,
-                * Section 73.10.2, we may have to wait up to 500ms if KR is
+                * Section 73.10.2, we may have to wait up to 1000ms if KR is
                 * attempted.  82599 uses the same timing for 10g SFI.
                 */
                for (i = 0; i < 10; i++) {
                 * attempted.  82599 uses the same timing for 10g SFI.
                 */
                for (i = 0; i < 10; i++) {
index bee8586..a009448 100644 (file)
@@ -354,8 +354,9 @@ mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr)
        DEBUG("port %u inserting MR(%p) to global cache",
              dev->data->port_id, (void *)mr);
        for (n = 0; n < mr->ms_bmp_n; ) {
        DEBUG("port %u inserting MR(%p) to global cache",
              dev->data->port_id, (void *)mr);
        for (n = 0; n < mr->ms_bmp_n; ) {
-               struct mlx4_mr_cache entry = { 0, };
+               struct mlx4_mr_cache entry;
 
 
+               memset(&entry, 0, sizeof(entry));
                /* Find a contiguous chunk and advance the index. */
                n = mr_find_next_chunk(mr, &entry, n);
                if (!entry.end)
                /* Find a contiguous chunk and advance the index. */
                n = mr_find_next_chunk(mr, &entry, n);
                if (!entry.end)
@@ -398,8 +399,9 @@ mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
                if (mr->ms_n == 0)
                        continue;
                for (n = 0; n < mr->ms_bmp_n; ) {
                if (mr->ms_n == 0)
                        continue;
                for (n = 0; n < mr->ms_bmp_n; ) {
-                       struct mlx4_mr_cache ret = { 0, };
+                       struct mlx4_mr_cache ret;
 
 
+                       memset(&ret, 0, sizeof(ret));
                        n = mr_find_next_chunk(mr, &ret, n);
                        if (addr >= ret.start && addr < ret.end) {
                                /* Found. */
                        n = mr_find_next_chunk(mr, &ret, n);
                        if (addr >= ret.start && addr < ret.end) {
                                /* Found. */
@@ -571,7 +573,7 @@ mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
         * Find out a contiguous virtual address chunk in use, to which the
         * given address belongs, in order to register maximum range. In the
         * best case where mempools are not dynamically recreated and
         * Find out a contiguous virtual address chunk in use, to which the
         * given address belongs, in order to register maximum range. In the
         * best case where mempools are not dynamically recreated and
-        * '--socket-mem' is speicified as an EAL option, it is very likely to
+        * '--socket-mem' is specified as an EAL option, it is very likely to
         * have only one MR(LKey) per a socket and per a hugepage-size even
         * though the system memory is highly fragmented.
         */
         * have only one MR(LKey) per a socket and per a hugepage-size even
         * though the system memory is highly fragmented.
         */
@@ -688,8 +690,9 @@ alloc_resources:
         */
        for (n = 0; n < ms_n; ++n) {
                uintptr_t start;
         */
        for (n = 0; n < ms_n; ++n) {
                uintptr_t start;
-               struct mlx4_mr_cache ret = { 0, };
+               struct mlx4_mr_cache ret;
 
 
+               memset(&ret, 0, sizeof(ret));
                start = data_re.start + n * msl->page_sz;
                /* Exclude memsegs already registered by other MRs. */
                if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
                start = data_re.start + n * msl->page_sz;
                /* Exclude memsegs already registered by other MRs. */
                if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
@@ -1039,7 +1042,7 @@ mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr)
  * @return
  *   Searched LKey on success, UINT32_MAX on no match.
  */
  * @return
  *   Searched LKey on success, UINT32_MAX on no match.
  */
-uint32_t
+static uint32_t
 mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
 {
        struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
 mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
 {
        struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
@@ -1050,6 +1053,32 @@ mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
        return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
 }
 
        return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
 }
 
+/**
+ * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
+ * list, register the mempool of the mbuf as externally allocated memory.
+ *
+ * @param txq
+ *   Pointer to Tx queue structure.
+ * @param mb
+ *   Pointer to mbuf.
+ *
+ * @return
+ *   Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb)
+{
+       uintptr_t addr = (uintptr_t)mb->buf_addr;
+       uint32_t lkey;
+
+       lkey = mlx4_tx_addr2mr_bh(txq, addr);
+       if (lkey == UINT32_MAX && rte_errno == ENXIO) {
+               /* Mempool may have externally allocated memory. */
+               return mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb));
+       }
+       return lkey;
+}
+
 /**
  * Flush all of the local cache entries.
  *
 /**
  * Flush all of the local cache entries.
  *
@@ -1277,8 +1306,9 @@ mlx4_mr_dump_dev(struct rte_eth_dev *dev)
                if (mr->ms_n == 0)
                        continue;
                for (n = 0; n < mr->ms_bmp_n; ) {
                if (mr->ms_n == 0)
                        continue;
                for (n = 0; n < mr->ms_bmp_n; ) {
-                       struct mlx4_mr_cache ret = { 0, };
+                       struct mlx4_mr_cache ret;
 
 
+                       memset(&ret, 0, sizeof(ret));
                        n = mr_find_next_chunk(mr, &ret, n);
                        if (!ret.end)
                                break;
                        n = mr_find_next_chunk(mr, &ret, n);
                        if (!ret.end)
                                break;
index 1be060c..d7ec4e0 100644 (file)
@@ -162,7 +162,7 @@ void mlx4_tx_queue_release(void *dpdk_txq);
 
 void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl);
 uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr);
 
 void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl);
 uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr);
-uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr);
+uint32_t mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb);
 uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr,
                               struct rte_mempool *mp);
 
 uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr,
                               struct rte_mempool *mp);
 
@@ -176,7 +176,7 @@ uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr,
  * @return
  *   Memory pool where data is located for given mbuf.
  */
  * @return
  *   Memory pool where data is located for given mbuf.
  */
-static struct rte_mempool *
+static inline struct rte_mempool *
 mlx4_mb2mp(struct rte_mbuf *buf)
 {
        if (unlikely(RTE_MBUF_INDIRECT(buf)))
 mlx4_mb2mp(struct rte_mbuf *buf)
 {
        if (unlikely(RTE_MBUF_INDIRECT(buf)))
@@ -225,9 +225,10 @@ mlx4_rx_addr2mr(struct rxq *rxq, uintptr_t addr)
  *   Searched LKey on success, UINT32_MAX on no match.
  */
 static __rte_always_inline uint32_t
  *   Searched LKey on success, UINT32_MAX on no match.
  */
 static __rte_always_inline uint32_t
-mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr)
+mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
 {
        struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
 {
        struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+       uintptr_t addr = (uintptr_t)mb->buf_addr;
        uint32_t lkey;
 
        /* Check generation bit to see if there's any change on existing MRs. */
        uint32_t lkey;
 
        /* Check generation bit to see if there's any change on existing MRs. */
@@ -238,23 +239,8 @@ mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr)
                                    MLX4_MR_CACHE_N, addr);
        if (likely(lkey != UINT32_MAX))
                return lkey;
                                    MLX4_MR_CACHE_N, addr);
        if (likely(lkey != UINT32_MAX))
                return lkey;
-       /* Take slower bottom-half (binary search) on miss. */
-       return mlx4_tx_addr2mr_bh(txq, addr);
-}
-
-static __rte_always_inline uint32_t
-mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
-{
-       uintptr_t addr = (uintptr_t)mb->buf_addr;
-       uint32_t lkey = mlx4_tx_addr2mr(txq, addr);
-
-       if (likely(lkey != UINT32_MAX))
-               return lkey;
-       if (rte_errno == ENXIO) {
-               /* Mempool may have externally allocated memory. */
-               lkey = mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb));
-       }
-       return lkey;
+       /* Take slower bottom-half on miss. */
+       return mlx4_tx_mb2mr_bh(txq, mb);
 }
 
 #endif /* MLX4_RXTX_H_ */
 }
 
 #endif /* MLX4_RXTX_H_ */
index 5ad3a11..97dc3e1 100644 (file)
@@ -294,7 +294,7 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = {
        },
        {
                .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
        },
        {
                .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
-               .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
+               .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
        },
        {
                .tunnel = MLX5_FLOW_LAYER_MPLS,
        },
        {
                .tunnel = MLX5_FLOW_LAYER_MPLS,
@@ -1593,12 +1593,14 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
 /**
  * Validate MPLS item.
  *
 /**
  * Validate MPLS item.
  *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
  * @param[in] item
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
  * @param[in] item
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
- * @param[in] target_protocol
- *   The next protocol in the previous item.
+ * @param[in] prev_layer
+ *   The protocol layer indicated in previous item.
  * @param[out] error
  *   Pointer to error structure.
  *
  * @param[out] error
  *   Pointer to error structure.
  *
@@ -1606,16 +1608,27 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
+mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
+                            const struct rte_flow_item *item __rte_unused,
                             uint64_t item_flags __rte_unused,
                             uint64_t item_flags __rte_unused,
-                            uint8_t target_protocol __rte_unused,
+                            uint64_t prev_layer __rte_unused,
                             struct rte_flow_error *error)
 {
 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
        const struct rte_flow_item_mpls *mask = item->mask;
                             struct rte_flow_error *error)
 {
 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
        const struct rte_flow_item_mpls *mask = item->mask;
+       struct priv *priv = dev->data->dev_private;
        int ret;
 
        int ret;
 
-       if (target_protocol != 0xff && target_protocol != IPPROTO_MPLS)
+       if (!priv->config.mpls_en)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                         "MPLS not supported or"
+                                         " disabled in firmware"
+                                         " configuration.");
+       /* MPLS over IP, UDP, GRE is allowed */
+       if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
+                           MLX5_FLOW_LAYER_OUTER_L4_UDP |
+                           MLX5_FLOW_LAYER_GRE)))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "protocol filtering not compatible"
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "protocol filtering not compatible"
@@ -2127,14 +2140,14 @@ static void
 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
                  struct rte_flow *flow)
 {
 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
                  struct rte_flow *flow)
 {
-       flow_drv_destroy(dev, flow);
-       TAILQ_REMOVE(list, flow, next);
        /*
         * Update RX queue flags only if port is started, otherwise it is
         * already clean.
         */
        if (dev->data->dev_started)
                flow_rxq_flags_trim(dev, flow);
        /*
         * Update RX queue flags only if port is started, otherwise it is
         * already clean.
         */
        if (dev->data->dev_started)
                flow_rxq_flags_trim(dev, flow);
+       flow_drv_destroy(dev, flow);
+       TAILQ_REMOVE(list, flow, next);
        rte_free(flow->fdir);
        rte_free(flow);
 }
        rte_free(flow->fdir);
        rte_free(flow);
 }
index 51ab47f..4a7c052 100644 (file)
 #define IPPROTO_MPLS 137
 #endif
 
 #define IPPROTO_MPLS 137
 #endif
 
+/* UDP port number for MPLS */
+#define MLX5_UDP_PORT_MPLS 6635
+
 /* UDP port numbers for VxLAN. */
 #define MLX5_UDP_PORT_VXLAN 4789
 #define MLX5_UDP_PORT_VXLAN_GPE 4790
 /* UDP port numbers for VxLAN. */
 #define MLX5_UDP_PORT_VXLAN 4789
 #define MLX5_UDP_PORT_VXLAN_GPE 4790
@@ -219,6 +222,7 @@ struct mlx5_flow_dv {
 struct mlx5_flow_tcf {
        struct nlmsghdr *nlh;
        struct tcmsg *tcm;
 struct mlx5_flow_tcf {
        struct nlmsghdr *nlh;
        struct tcmsg *tcm;
+       uint32_t *ptc_flags; /**< tc rule applied flags. */
        union { /**< Tunnel encap/decap descriptor. */
                struct flow_tcf_tunnel_hdr *tunnel;
                struct flow_tcf_vxlan_decap *vxlan_decap;
        union { /**< Tunnel encap/decap descriptor. */
                struct flow_tcf_tunnel_hdr *tunnel;
                struct flow_tcf_vxlan_decap *vxlan_decap;
@@ -381,9 +385,10 @@ int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                                 uint64_t item_flags,
                                 struct rte_flow_error *error);
 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
                                 uint64_t item_flags,
                                 struct rte_flow_error *error);
-int mlx5_flow_validate_item_mpls(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
+                                const struct rte_flow_item *item,
                                 uint64_t item_flags,
                                 uint64_t item_flags,
-                                uint8_t target_protocol,
+                                uint64_t prev_layer,
                                 struct rte_flow_error *error);
 int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
                                uint64_t item_flags,
                                 struct rte_flow_error *error);
 int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
                                uint64_t item_flags,
index a2edd16..1f31874 100644 (file)
@@ -775,6 +775,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
        int ret;
        uint64_t action_flags = 0;
        uint64_t item_flags = 0;
        int ret;
        uint64_t action_flags = 0;
        uint64_t item_flags = 0;
+       uint64_t last_item = 0;
        int tunnel = 0;
        uint8_t next_protocol = 0xff;
        int actions_n = 0;
        int tunnel = 0;
        uint8_t next_protocol = 0xff;
        int actions_n = 0;
@@ -794,24 +795,24 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                          error);
                        if (ret < 0)
                                return ret;
                                                          error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
-                                              MLX5_FLOW_LAYER_OUTER_L2;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+                                            MLX5_FLOW_LAYER_OUTER_L2;
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
                        ret = mlx5_flow_validate_item_vlan(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
                        ret = mlx5_flow_validate_item_vlan(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
-                                              MLX5_FLOW_LAYER_OUTER_VLAN;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+                                            MLX5_FLOW_LAYER_OUTER_VLAN;
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        ret = mlx5_flow_validate_item_ipv4(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        ret = mlx5_flow_validate_item_ipv4(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
-                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+                                            MLX5_FLOW_LAYER_OUTER_L3_IPV4;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv4 *)
                             items->mask)->hdr.next_proto_id) {
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv4 *)
                             items->mask)->hdr.next_proto_id) {
@@ -831,8 +832,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                           error);
                        if (ret < 0)
                                return ret;
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
-                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+                                            MLX5_FLOW_LAYER_OUTER_L3_IPV6;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv6 *)
                             items->mask)->hdr.proto) {
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv6 *)
                             items->mask)->hdr.proto) {
@@ -855,8 +856,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                 error);
                        if (ret < 0)
                                return ret;
                                                 error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
-                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+                                            MLX5_FLOW_LAYER_OUTER_L4_TCP;
                        break;
                case RTE_FLOW_ITEM_TYPE_UDP:
                        ret = mlx5_flow_validate_item_udp(items, item_flags,
                        break;
                case RTE_FLOW_ITEM_TYPE_UDP:
                        ret = mlx5_flow_validate_item_udp(items, item_flags,
@@ -864,8 +865,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                          error);
                        if (ret < 0)
                                return ret;
                                                          error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
-                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+                                            MLX5_FLOW_LAYER_OUTER_L4_UDP;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
                case RTE_FLOW_ITEM_TYPE_NVGRE:
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
                case RTE_FLOW_ITEM_TYPE_NVGRE:
@@ -873,14 +874,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                          next_protocol, error);
                        if (ret < 0)
                                return ret;
                                                          next_protocol, error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_GRE;
+                       last_item = MLX5_FLOW_LAYER_GRE;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
                        ret = mlx5_flow_validate_item_vxlan(items, item_flags,
                                                            error);
                        if (ret < 0)
                                return ret;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
                        ret = mlx5_flow_validate_item_vxlan(items, item_flags,
                                                            error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_VXLAN;
+                       last_item = MLX5_FLOW_LAYER_VXLAN;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
                        ret = mlx5_flow_validate_item_vxlan_gpe(items,
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
                        ret = mlx5_flow_validate_item_vxlan_gpe(items,
@@ -888,20 +889,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
                                                                error);
                        if (ret < 0)
                                return ret;
                                                                error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+                       last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_MPLS:
+                       ret = mlx5_flow_validate_item_mpls(dev, items,
+                                                          item_flags,
+                                                          last_item, error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_MPLS;
                        break;
                case RTE_FLOW_ITEM_TYPE_META:
                        ret = flow_dv_validate_item_meta(dev, items, attr,
                                                         error);
                        if (ret < 0)
                                return ret;
                        break;
                case RTE_FLOW_ITEM_TYPE_META:
                        ret = flow_dv_validate_item_meta(dev, items, attr,
                                                         error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_ITEM_METADATA;
+                       last_item = MLX5_FLOW_ITEM_METADATA;
                        break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "item not supported");
                }
                        break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "item not supported");
                }
+               item_flags |= last_item;
        }
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
        }
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
@@ -1608,6 +1618,96 @@ flow_dv_translate_item_vxlan(void *matcher, void *key,
                vni_v[i] = vni_m[i] & vxlan_v->vni[i];
 }
 
                vni_v[i] = vni_m[i] & vxlan_v->vni[i];
 }
 
+/**
+ * Add MPLS item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] prev_layer
+ *   The protocol layer indicated in previous item.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_mpls(void *matcher, void *key,
+                           const struct rte_flow_item *item,
+                           uint64_t prev_layer,
+                           int inner)
+{
+       const uint32_t *in_mpls_m = item->mask;
+       const uint32_t *in_mpls_v = item->spec;
+       uint32_t *out_mpls_m = 0;
+       uint32_t *out_mpls_v = 0;
+       void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+       void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+       void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
+                                    misc_parameters_2);
+       void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
+       void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+       void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+
+       switch (prev_layer) {
+       case MLX5_FLOW_LAYER_OUTER_L4_UDP:
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+                        MLX5_UDP_PORT_MPLS);
+               break;
+       case MLX5_FLOW_LAYER_GRE:
+               MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
+               MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+                        ETHER_TYPE_MPLS);
+               break;
+       default:
+               MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+                        IPPROTO_MPLS);
+               break;
+       }
+       if (!in_mpls_v)
+               return;
+       if (!in_mpls_m)
+               in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
+       switch (prev_layer) {
+       case MLX5_FLOW_LAYER_OUTER_L4_UDP:
+               out_mpls_m =
+                       (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
+                                                outer_first_mpls_over_udp);
+               out_mpls_v =
+                       (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
+                                                outer_first_mpls_over_udp);
+               break;
+       case MLX5_FLOW_LAYER_GRE:
+               out_mpls_m =
+                       (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
+                                                outer_first_mpls_over_gre);
+               out_mpls_v =
+                       (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
+                                                outer_first_mpls_over_gre);
+               break;
+       default:
+               /* Inner MPLS not over GRE is not supported. */
+               if (!inner) {
+                       out_mpls_m =
+                               (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
+                                                        misc2_m,
+                                                        outer_first_mpls);
+                       out_mpls_v =
+                               (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
+                                                        misc2_v,
+                                                        outer_first_mpls);
+               }
+               break;
+       }
+       if (out_mpls_m && out_mpls_v) {
+               *out_mpls_m = *in_mpls_m;
+               *out_mpls_v = *in_mpls_v & *in_mpls_m;
+       }
+}
+
 /**
  * Add META item to matcher
  *
 /**
  * Add META item to matcher
  *
@@ -1786,6 +1886,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
        struct priv *priv = dev->data->dev_private;
        struct rte_flow *flow = dev_flow->flow;
        uint64_t item_flags = 0;
        struct priv *priv = dev->data->dev_private;
        struct rte_flow *flow = dev_flow->flow;
        uint64_t item_flags = 0;
+       uint64_t last_item = 0;
        uint64_t action_flags = 0;
        uint64_t priority = attr->priority;
        struct mlx5_flow_dv_matcher matcher = {
        uint64_t action_flags = 0;
        uint64_t priority = attr->priority;
        struct mlx5_flow_dv_matcher matcher = {
@@ -1940,17 +2041,17 @@ flow_dv_translate(struct rte_eth_dev *dev,
                        flow_dv_translate_item_eth(match_mask, match_value,
                                                   items, tunnel);
                        matcher.priority = MLX5_PRIORITY_MAP_L2;
                        flow_dv_translate_item_eth(match_mask, match_value,
                                                   items, tunnel);
                        matcher.priority = MLX5_PRIORITY_MAP_L2;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
-                                              MLX5_FLOW_LAYER_OUTER_L2;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+                                            MLX5_FLOW_LAYER_OUTER_L2;
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
                        flow_dv_translate_item_vlan(match_mask, match_value,
                                                    items, tunnel);
                        matcher.priority = MLX5_PRIORITY_MAP_L2;
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
                        flow_dv_translate_item_vlan(match_mask, match_value,
                                                    items, tunnel);
                        matcher.priority = MLX5_PRIORITY_MAP_L2;
-                       item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
-                                               MLX5_FLOW_LAYER_INNER_VLAN) :
-                                              (MLX5_FLOW_LAYER_OUTER_L2 |
-                                               MLX5_FLOW_LAYER_OUTER_VLAN);
+                       last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
+                                             MLX5_FLOW_LAYER_INNER_VLAN) :
+                                            (MLX5_FLOW_LAYER_OUTER_L2 |
+                                             MLX5_FLOW_LAYER_OUTER_VLAN);
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        flow_dv_translate_item_ipv4(match_mask, match_value,
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        flow_dv_translate_item_ipv4(match_mask, match_value,
@@ -1961,8 +2062,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                        (dev_flow, tunnel,
                                         MLX5_IPV4_LAYER_TYPES,
                                         MLX5_IPV4_IBV_RX_HASH);
                                        (dev_flow, tunnel,
                                         MLX5_IPV4_LAYER_TYPES,
                                         MLX5_IPV4_IBV_RX_HASH);
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
-                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+                                            MLX5_FLOW_LAYER_OUTER_L3_IPV4;
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV6:
                        flow_dv_translate_item_ipv6(match_mask, match_value,
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV6:
                        flow_dv_translate_item_ipv6(match_mask, match_value,
@@ -1973,8 +2074,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                        (dev_flow, tunnel,
                                         MLX5_IPV6_LAYER_TYPES,
                                         MLX5_IPV6_IBV_RX_HASH);
                                        (dev_flow, tunnel,
                                         MLX5_IPV6_LAYER_TYPES,
                                         MLX5_IPV6_IBV_RX_HASH);
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
-                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+                                            MLX5_FLOW_LAYER_OUTER_L3_IPV6;
                        break;
                case RTE_FLOW_ITEM_TYPE_TCP:
                        flow_dv_translate_item_tcp(match_mask, match_value,
                        break;
                case RTE_FLOW_ITEM_TYPE_TCP:
                        flow_dv_translate_item_tcp(match_mask, match_value,
@@ -1985,8 +2086,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                        (dev_flow, tunnel, ETH_RSS_TCP,
                                         IBV_RX_HASH_SRC_PORT_TCP |
                                         IBV_RX_HASH_DST_PORT_TCP);
                                        (dev_flow, tunnel, ETH_RSS_TCP,
                                         IBV_RX_HASH_SRC_PORT_TCP |
                                         IBV_RX_HASH_DST_PORT_TCP);
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
-                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+                                            MLX5_FLOW_LAYER_OUTER_L4_TCP;
                        break;
                case RTE_FLOW_ITEM_TYPE_UDP:
                        flow_dv_translate_item_udp(match_mask, match_value,
                        break;
                case RTE_FLOW_ITEM_TYPE_UDP:
                        flow_dv_translate_item_udp(match_mask, match_value,
@@ -1997,37 +2098,43 @@ flow_dv_translate(struct rte_eth_dev *dev,
                                        (dev_flow, tunnel, ETH_RSS_UDP,
                                         IBV_RX_HASH_SRC_PORT_UDP |
                                         IBV_RX_HASH_DST_PORT_UDP);
                                        (dev_flow, tunnel, ETH_RSS_UDP,
                                         IBV_RX_HASH_SRC_PORT_UDP |
                                         IBV_RX_HASH_DST_PORT_UDP);
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
-                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+                                            MLX5_FLOW_LAYER_OUTER_L4_UDP;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
                        flow_dv_translate_item_gre(match_mask, match_value,
                                                   items, tunnel);
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
                        flow_dv_translate_item_gre(match_mask, match_value,
                                                   items, tunnel);
-                       item_flags |= MLX5_FLOW_LAYER_GRE;
+                       last_item = MLX5_FLOW_LAYER_GRE;
                        break;
                case RTE_FLOW_ITEM_TYPE_NVGRE:
                        flow_dv_translate_item_nvgre(match_mask, match_value,
                                                     items, tunnel);
                        break;
                case RTE_FLOW_ITEM_TYPE_NVGRE:
                        flow_dv_translate_item_nvgre(match_mask, match_value,
                                                     items, tunnel);
-                       item_flags |= MLX5_FLOW_LAYER_GRE;
+                       last_item = MLX5_FLOW_LAYER_GRE;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
                        flow_dv_translate_item_vxlan(match_mask, match_value,
                                                     items, tunnel);
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
                        flow_dv_translate_item_vxlan(match_mask, match_value,
                                                     items, tunnel);
-                       item_flags |= MLX5_FLOW_LAYER_VXLAN;
+                       last_item = MLX5_FLOW_LAYER_VXLAN;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
                        flow_dv_translate_item_vxlan(match_mask, match_value,
                                                     items, tunnel);
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
                        flow_dv_translate_item_vxlan(match_mask, match_value,
                                                     items, tunnel);
-                       item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+                       last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_MPLS:
+                       flow_dv_translate_item_mpls(match_mask, match_value,
+                                                   items, last_item, tunnel);
+                       last_item = MLX5_FLOW_LAYER_MPLS;
                        break;
                case RTE_FLOW_ITEM_TYPE_META:
                        flow_dv_translate_item_meta(match_mask, match_value,
                                                    items);
                        break;
                case RTE_FLOW_ITEM_TYPE_META:
                        flow_dv_translate_item_meta(match_mask, match_value,
                                                    items);
-                       item_flags |= MLX5_FLOW_ITEM_METADATA;
+                       last_item = MLX5_FLOW_ITEM_METADATA;
                        break;
                default:
                        break;
                }
                        break;
                default:
                        break;
                }
+               item_flags |= last_item;
        }
        assert(!flow_dv_check_valid_spec(matcher.mask.buf,
                                         dev_flow->dv.value.buf));
        }
        assert(!flow_dv_check_valid_spec(matcher.mask.buf,
                                         dev_flow->dv.value.buf));
@@ -2275,8 +2382,10 @@ flow_dv_query(struct rte_eth_dev *dev __rte_unused,
              void *data __rte_unused,
              struct rte_flow_error *error __rte_unused)
 {
              void *data __rte_unused,
              struct rte_flow_error *error __rte_unused)
 {
-       rte_errno = ENOTSUP;
-       return -rte_errno;
+       return rte_flow_error_set(error, ENOTSUP,
+                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                 NULL,
+                                 "flow query with DV is not supported");
 }
 
 
 }
 
 
index 97d2a54..2f6b7d6 100644 (file)
@@ -160,6 +160,9 @@ struct tc_tunnel_key {
 #ifndef TCA_CLS_FLAGS_SKIP_SW
 #define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
 #endif
 #ifndef TCA_CLS_FLAGS_SKIP_SW
 #define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
 #endif
+#ifndef TCA_CLS_FLAGS_IN_HW
+#define TCA_CLS_FLAGS_IN_HW (1 << 2)
+#endif
 #ifndef HAVE_TCA_CHAIN
 #define TCA_CHAIN 11
 #endif
 #ifndef HAVE_TCA_CHAIN
 #define TCA_CHAIN 11
 #endif
@@ -3699,6 +3702,8 @@ override_na_vlan_priority:
        assert(na_flower);
        assert(na_flower_act);
        mnl_attr_nest_end(nlh, na_flower_act);
        assert(na_flower);
        assert(na_flower_act);
        mnl_attr_nest_end(nlh, na_flower_act);
+       dev_flow->tcf.ptc_flags = mnl_attr_get_payload
+                                       (mnl_nlmsg_get_payload_tail(nlh));
        mnl_attr_put_u32(nlh, TCA_FLOWER_FLAGS, decap.vxlan ?
                                                0 : TCA_CLS_FLAGS_SKIP_SW);
        mnl_attr_nest_end(nlh, na_flower);
        mnl_attr_put_u32(nlh, TCA_FLOWER_FLAGS, decap.vxlan ?
                                                0 : TCA_CLS_FLAGS_SKIP_SW);
        mnl_attr_nest_end(nlh, na_flower);
@@ -3717,10 +3722,6 @@ override_na_vlan_priority:
  * @param nlh
  *   Message to send. This function always raises the NLM_F_ACK flag before
  *   sending.
  * @param nlh
  *   Message to send. This function always raises the NLM_F_ACK flag before
  *   sending.
- * @param[in] msglen
- *   Message length. Message buffer may contain multiple commands and
- *   nlmsg_len field not always corresponds to actual message length.
- *   If 0 specified the nlmsg_len field in header is used as message length.
  * @param[in] cb
  *   Callback handler for received message.
  * @param[in] arg
  * @param[in] cb
  *   Callback handler for received message.
  * @param[in] arg
@@ -3732,52 +3733,64 @@ override_na_vlan_priority:
 static int
 flow_tcf_nl_ack(struct mlx5_flow_tcf_context *tcf,
                struct nlmsghdr *nlh,
 static int
 flow_tcf_nl_ack(struct mlx5_flow_tcf_context *tcf,
                struct nlmsghdr *nlh,
-               uint32_t msglen,
                mnl_cb_t cb, void *arg)
 {
        unsigned int portid = mnl_socket_get_portid(tcf->nl);
        uint32_t seq = tcf->seq++;
                mnl_cb_t cb, void *arg)
 {
        unsigned int portid = mnl_socket_get_portid(tcf->nl);
        uint32_t seq = tcf->seq++;
-       int err, ret;
+       int ret, err = 0;
 
        assert(tcf->nl);
        assert(tcf->buf);
 
        assert(tcf->nl);
        assert(tcf->buf);
-       if (!seq)
+       if (!seq) {
                /* seq 0 is reserved for kernel event-driven notifications. */
                seq = tcf->seq++;
                /* seq 0 is reserved for kernel event-driven notifications. */
                seq = tcf->seq++;
+       }
        nlh->nlmsg_seq = seq;
        nlh->nlmsg_seq = seq;
-       if (!msglen) {
-               msglen = nlh->nlmsg_len;
-               nlh->nlmsg_flags |= NLM_F_ACK;
+       nlh->nlmsg_flags |= NLM_F_ACK;
+       ret = mnl_socket_sendto(tcf->nl, nlh, nlh->nlmsg_len);
+       if (ret <= 0) {
+               /* Message send error occurres. */
+               rte_errno = errno;
+               return -rte_errno;
        }
        }
-       ret = mnl_socket_sendto(tcf->nl, nlh, msglen);
-       err = (ret <= 0) ? errno : 0;
        nlh = (struct nlmsghdr *)(tcf->buf);
        /*
         * The following loop postpones non-fatal errors until multipart
         * messages are complete.
         */
        nlh = (struct nlmsghdr *)(tcf->buf);
        /*
         * The following loop postpones non-fatal errors until multipart
         * messages are complete.
         */
-       if (ret > 0)
-               while (true) {
-                       ret = mnl_socket_recvfrom(tcf->nl, tcf->buf,
-                                                 tcf->buf_size);
+       while (true) {
+               ret = mnl_socket_recvfrom(tcf->nl, tcf->buf, tcf->buf_size);
+               if (ret < 0) {
+                       err = errno;
+                       /*
+                        * In case of overflow Will receive till
+                        * end of multipart message. We may lost part
+                        * of reply messages but mark and return an error.
+                        */
+                       if (err != ENOSPC ||
+                           !(nlh->nlmsg_flags & NLM_F_MULTI) ||
+                           nlh->nlmsg_type == NLMSG_DONE)
+                               break;
+               } else {
+                       ret = mnl_cb_run(nlh, ret, seq, portid, cb, arg);
+                       if (!ret) {
+                               /*
+                                * libmnl returns 0 if DONE or
+                                * success ACK message found.
+                                */
+                               break;
+                       }
                        if (ret < 0) {
                        if (ret < 0) {
+                               /*
+                                * ACK message with error found
+                                * or some error occurred.
+                                */
                                err = errno;
                                err = errno;
-                               if (err != ENOSPC)
-                                       break;
-                       }
-                       if (!err) {
-                               ret = mnl_cb_run(nlh, ret, seq, portid,
-                                                cb, arg);
-                               if (ret < 0) {
-                                       err = errno;
-                                       break;
-                               }
-                       }
-                       /* Will receive till end of multipart message */
-                       if (!(nlh->nlmsg_flags & NLM_F_MULTI) ||
-                             nlh->nlmsg_type == NLMSG_DONE)
                                break;
                                break;
+                       }
+                       /* We should continue receiving. */
                }
                }
+       }
        if (!err)
                return 0;
        rte_errno = err;
        if (!err)
                return 0;
        rte_errno = err;
@@ -3886,7 +3899,7 @@ flow_tcf_send_nlcmd(struct mlx5_flow_tcf_context *tcf,
                        nlh = (struct nlmsghdr *)&bc->msg[msg];
                        assert((bc->size - msg) >= nlh->nlmsg_len);
                        msg += nlh->nlmsg_len;
                        nlh = (struct nlmsghdr *)&bc->msg[msg];
                        assert((bc->size - msg) >= nlh->nlmsg_len);
                        msg += nlh->nlmsg_len;
-                       rc = flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL);
+                       rc = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
                        if (rc) {
                                DRV_LOG(WARNING,
                                        "netlink: cleanup error %d", rc);
                        if (rc) {
                                DRV_LOG(WARNING,
                                        "netlink: cleanup error %d", rc);
@@ -4019,7 +4032,7 @@ flow_tcf_encap_local_cleanup(struct mlx5_flow_tcf_context *tcf,
        ifa->ifa_family = AF_UNSPEC;
        ifa->ifa_index = ifindex;
        ifa->ifa_scope = RT_SCOPE_LINK;
        ifa->ifa_family = AF_UNSPEC;
        ifa->ifa_index = ifindex;
        ifa->ifa_scope = RT_SCOPE_LINK;
-       ret = flow_tcf_nl_ack(tcf, nlh, 0, flow_tcf_collect_local_cb, &ctx);
+       ret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_local_cb, &ctx);
        if (ret)
                DRV_LOG(WARNING, "netlink: query device list error %d", ret);
        ret = flow_tcf_send_nlcmd(tcf, &ctx);
        if (ret)
                DRV_LOG(WARNING, "netlink: query device list error %d", ret);
        ret = flow_tcf_send_nlcmd(tcf, &ctx);
@@ -4140,7 +4153,7 @@ flow_tcf_encap_neigh_cleanup(struct mlx5_flow_tcf_context *tcf,
        ndm->ndm_family = AF_UNSPEC;
        ndm->ndm_ifindex = ifindex;
        ndm->ndm_state = NUD_PERMANENT;
        ndm->ndm_family = AF_UNSPEC;
        ndm->ndm_ifindex = ifindex;
        ndm->ndm_state = NUD_PERMANENT;
-       ret = flow_tcf_nl_ack(tcf, nlh, 0, flow_tcf_collect_neigh_cb, &ctx);
+       ret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_neigh_cb, &ctx);
        if (ret)
                DRV_LOG(WARNING, "netlink: query device list error %d", ret);
        ret = flow_tcf_send_nlcmd(tcf, &ctx);
        if (ret)
                DRV_LOG(WARNING, "netlink: query device list error %d", ret);
        ret = flow_tcf_send_nlcmd(tcf, &ctx);
@@ -4269,7 +4282,7 @@ flow_tcf_encap_iface_cleanup(struct mlx5_flow_tcf_context *tcf,
        nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
        ifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));
        ifm->ifi_family = AF_UNSPEC;
        nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
        ifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));
        ifm->ifi_family = AF_UNSPEC;
-       ret = flow_tcf_nl_ack(tcf, nlh, 0, flow_tcf_collect_vxlan_cb, &ctx);
+       ret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_vxlan_cb, &ctx);
        if (ret)
                DRV_LOG(WARNING, "netlink: query device list error %d", ret);
        ret = flow_tcf_send_nlcmd(tcf, &ctx);
        if (ret)
                DRV_LOG(WARNING, "netlink: query device list error %d", ret);
        ret = flow_tcf_send_nlcmd(tcf, &ctx);
@@ -4341,7 +4354,7 @@ flow_tcf_rule_local(struct mlx5_flow_tcf_context *tcf,
                                          sizeof(encap->ipv6.dst),
                                          &encap->ipv6.dst);
        }
                                          sizeof(encap->ipv6.dst),
                                          &encap->ipv6.dst);
        }
-       if (!flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL))
+       if (!flow_tcf_nl_ack(tcf, nlh, NULL, NULL))
                return 0;
        return rte_flow_error_set(error, rte_errno,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                return 0;
        return rte_flow_error_set(error, rte_errno,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -4404,7 +4417,7 @@ flow_tcf_rule_neigh(struct mlx5_flow_tcf_context *tcf,
        if (encap->mask & FLOW_TCF_ENCAP_ETH_DST)
                mnl_attr_put(nlh, NDA_LLADDR, sizeof(encap->eth.dst),
                                                    &encap->eth.dst);
        if (encap->mask & FLOW_TCF_ENCAP_ETH_DST)
                mnl_attr_put(nlh, NDA_LLADDR, sizeof(encap->eth.dst),
                                                    &encap->eth.dst);
-       if (!flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL))
+       if (!flow_tcf_nl_ack(tcf, nlh, NULL, NULL))
                return 0;
        return rte_flow_error_set(error, rte_errno,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                return 0;
        return rte_flow_error_set(error, rte_errno,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -4679,7 +4692,7 @@ flow_tcf_vtep_delete(struct mlx5_flow_tcf_context *tcf,
                ifm->ifi_family = AF_UNSPEC;
                ifm->ifi_index = vtep->ifindex;
                assert(sizeof(buf) >= nlh->nlmsg_len);
                ifm->ifi_family = AF_UNSPEC;
                ifm->ifi_index = vtep->ifindex;
                assert(sizeof(buf) >= nlh->nlmsg_len);
-               ret = flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL);
+               ret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
                if (ret)
                        DRV_LOG(WARNING, "netlink: error deleting vxlan"
                                         " encap/decap ifindex %u",
                if (ret)
                        DRV_LOG(WARNING, "netlink: error deleting vxlan"
                                         " encap/decap ifindex %u",
@@ -4769,7 +4782,7 @@ flow_tcf_vtep_create(struct mlx5_flow_tcf_context *tcf,
        mnl_attr_nest_end(nlh, na_vxlan);
        mnl_attr_nest_end(nlh, na_info);
        assert(sizeof(buf) >= nlh->nlmsg_len);
        mnl_attr_nest_end(nlh, na_vxlan);
        mnl_attr_nest_end(nlh, na_info);
        assert(sizeof(buf) >= nlh->nlmsg_len);
-       ret = flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL);
+       ret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
        if (ret) {
                DRV_LOG(WARNING,
                        "netlink: VTEP %s create failure (%d)",
        if (ret) {
                DRV_LOG(WARNING,
                        "netlink: VTEP %s create failure (%d)",
@@ -4811,7 +4824,7 @@ flow_tcf_vtep_create(struct mlx5_flow_tcf_context *tcf,
        ifm->ifi_index = vtep->ifindex;
        ifm->ifi_flags = IFF_UP;
        ifm->ifi_change = IFF_UP;
        ifm->ifi_index = vtep->ifindex;
        ifm->ifi_flags = IFF_UP;
        ifm->ifi_change = IFF_UP;
-       ret = flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL);
+       ret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
        if (ret) {
                rte_flow_error_set(error, -errno,
                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
        if (ret) {
                rte_flow_error_set(error, -errno,
                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -5069,6 +5082,172 @@ flow_tcf_vtep_release(struct mlx5_flow_tcf_context *tcf,
        pthread_mutex_unlock(&vtep_list_mutex);
 }
 
        pthread_mutex_unlock(&vtep_list_mutex);
 }
 
+struct tcf_nlcb_query {
+       uint32_t handle;
+       uint32_t tc_flags;
+       uint32_t flags_valid:1;
+};
+
+/**
+ * Collect queried rule attributes. This is callback routine called by
+ * libmnl mnl_cb_run() in loop for every message in received packet.
+ * Current implementation collects the flower flags only.
+ *
+ * @param[in] nlh
+ *   Pointer to reply header.
+ * @param[in, out] arg
+ *   Context pointer for this callback.
+ *
+ * @return
+ *   A positive, nonzero value on success (required by libmnl
+ *   to continue messages processing).
+ */
+static int
+flow_tcf_collect_query_cb(const struct nlmsghdr *nlh, void *arg)
+{
+       struct tcf_nlcb_query *query = arg;
+       struct tcmsg *tcm = mnl_nlmsg_get_payload(nlh);
+       struct nlattr *na, *na_opt;
+       bool flower = false;
+
+       if (nlh->nlmsg_type != RTM_NEWTFILTER ||
+           tcm->tcm_handle != query->handle)
+               return 1;
+       mnl_attr_for_each(na, nlh, sizeof(*tcm)) {
+               switch (mnl_attr_get_type(na)) {
+               case TCA_KIND:
+                       if (strcmp(mnl_attr_get_payload(na), "flower")) {
+                               /* Not flower filter, drop entire message. */
+                               return 1;
+                       }
+                       flower = true;
+                       break;
+               case TCA_OPTIONS:
+                       if (!flower) {
+                               /* Not flower options, drop entire message. */
+                               return 1;
+                       }
+                       /* Check nested flower options. */
+                       mnl_attr_for_each_nested(na_opt, na) {
+                               switch (mnl_attr_get_type(na_opt)) {
+                               case TCA_FLOWER_FLAGS:
+                                       query->flags_valid = 1;
+                                       query->tc_flags =
+                                               mnl_attr_get_u32(na_opt);
+                                       break;
+                               }
+                       }
+                       break;
+               }
+       }
+       return 1;
+}
+
+/**
+ * Query a TC flower rule flags via netlink.
+ *
+ * @param[in] tcf
+ *   Context object initialized by mlx5_flow_tcf_context_create().
+ * @param[in] dev_flow
+ *   Pointer to the flow.
+ * @param[out] pflags
+ *   pointer to the data retrieved by the query.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise.
+ */
+static int
+flow_tcf_query_flags(struct mlx5_flow_tcf_context *tcf,
+                    struct mlx5_flow *dev_flow,
+                    uint32_t *pflags)
+{
+       struct nlmsghdr *nlh;
+       struct tcmsg *tcm;
+       struct tcf_nlcb_query query = {
+               .handle = dev_flow->tcf.tcm->tcm_handle,
+       };
+
+       nlh = mnl_nlmsg_put_header(tcf->buf);
+       nlh->nlmsg_type = RTM_GETTFILTER;
+       nlh->nlmsg_flags = NLM_F_REQUEST;
+       tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+       memcpy(tcm, dev_flow->tcf.tcm, sizeof(*tcm));
+       /*
+        * Ignore Netlink error for filter query operations.
+        * The reply length is sent by kernel as errno.
+        * Just check we got the flags option.
+        */
+       flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_query_cb, &query);
+       if (!query.flags_valid) {
+               *pflags = 0;
+               return -ENOENT;
+       }
+       *pflags = query.tc_flags;
+       return 0;
+}
+
+/**
+ * Query and check the in_hw set for specified rule.
+ *
+ * @param[in] tcf
+ *   Context object initialized by mlx5_flow_tcf_context_create().
+ * @param[in] dev_flow
+ *   Pointer to the flow to check.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise.
+ */
+static int
+flow_tcf_check_inhw(struct mlx5_flow_tcf_context *tcf,
+                   struct mlx5_flow *dev_flow)
+{
+       uint32_t flags;
+       int ret;
+
+       ret = flow_tcf_query_flags(tcf, dev_flow, &flags);
+       if (ret)
+               return ret;
+       return  (flags & TCA_CLS_FLAGS_IN_HW) ? 0 : -ENOENT;
+}
+
+/**
+ * Remove flow from E-Switch by sending Netlink message.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in, out] flow
+ *   Pointer to the sub flow.
+ */
+static void
+flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+       struct priv *priv = dev->data->dev_private;
+       struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
+       struct mlx5_flow *dev_flow;
+       struct nlmsghdr *nlh;
+
+       if (!flow)
+               return;
+       dev_flow = LIST_FIRST(&flow->dev_flows);
+       if (!dev_flow)
+               return;
+       /* E-Switch flow can't be expanded. */
+       assert(!LIST_NEXT(dev_flow, next));
+       if (dev_flow->tcf.applied) {
+               nlh = dev_flow->tcf.nlh;
+               nlh->nlmsg_type = RTM_DELTFILTER;
+               nlh->nlmsg_flags = NLM_F_REQUEST;
+               flow_tcf_nl_ack(ctx, nlh, NULL, NULL);
+               if (dev_flow->tcf.tunnel) {
+                       assert(dev_flow->tcf.tunnel->vtep);
+                       flow_tcf_vtep_release(ctx,
+                               dev_flow->tcf.tunnel->vtep,
+                               dev_flow);
+                       dev_flow->tcf.tunnel->vtep = NULL;
+               }
+               dev_flow->tcf.applied = 0;
+       }
+}
 
 /**
  * Apply flow to E-Switch by sending Netlink message.
 
 /**
  * Apply flow to E-Switch by sending Netlink message.
@@ -5120,8 +5299,22 @@ flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                *dev_flow->tcf.tunnel->ifindex_ptr =
                        dev_flow->tcf.tunnel->vtep->ifindex;
        }
                *dev_flow->tcf.tunnel->ifindex_ptr =
                        dev_flow->tcf.tunnel->vtep->ifindex;
        }
-       if (!flow_tcf_nl_ack(ctx, nlh, 0, NULL, NULL)) {
+       if (!flow_tcf_nl_ack(ctx, nlh, NULL, NULL)) {
                dev_flow->tcf.applied = 1;
                dev_flow->tcf.applied = 1;
+               if (*dev_flow->tcf.ptc_flags & TCA_CLS_FLAGS_SKIP_SW)
+                       return 0;
+               /*
+                * Rule was applied without skip_sw flag set.
+                * We should check whether the rule was acctually
+                * accepted by hardware (have look at in_hw flag).
+                */
+               if (flow_tcf_check_inhw(ctx, dev_flow)) {
+                       flow_tcf_remove(dev, flow);
+                       return rte_flow_error_set
+                               (error, ENOENT,
+                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                "netlink: rule has no in_hw flag set");
+               }
                return 0;
        }
        if (dev_flow->tcf.tunnel) {
                return 0;
        }
        if (dev_flow->tcf.tunnel) {
@@ -5136,45 +5329,6 @@ flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
                                  "netlink: failed to create TC flow rule");
 }
 
                                  "netlink: failed to create TC flow rule");
 }
 
-/**
- * Remove flow from E-Switch by sending Netlink message.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in, out] flow
- *   Pointer to the sub flow.
- */
-static void
-flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
-       struct priv *priv = dev->data->dev_private;
-       struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
-       struct mlx5_flow *dev_flow;
-       struct nlmsghdr *nlh;
-
-       if (!flow)
-               return;
-       dev_flow = LIST_FIRST(&flow->dev_flows);
-       if (!dev_flow)
-               return;
-       /* E-Switch flow can't be expanded. */
-       assert(!LIST_NEXT(dev_flow, next));
-       if (dev_flow->tcf.applied) {
-               nlh = dev_flow->tcf.nlh;
-               nlh->nlmsg_type = RTM_DELTFILTER;
-               nlh->nlmsg_flags = NLM_F_REQUEST;
-               flow_tcf_nl_ack(ctx, nlh, 0, NULL, NULL);
-               if (dev_flow->tcf.tunnel) {
-                       assert(dev_flow->tcf.tunnel->vtep);
-                       flow_tcf_vtep_release(ctx,
-                               dev_flow->tcf.tunnel->vtep,
-                               dev_flow);
-                       dev_flow->tcf.tunnel->vtep = NULL;
-               }
-               dev_flow->tcf.applied = 0;
-       }
-}
-
 /**
  * Remove flow from E-Switch and release resources of the device flow.
  *
 /**
  * Remove flow from E-Switch and release resources of the device flow.
  *
@@ -5494,7 +5648,7 @@ flow_tcf_nl_filter_parse_and_get(struct nlmsghdr *cnlh,
  *   Message received from Netlink.
  * @param[out] data
  *   Pointer to data area to be filled by the parsing routine.
  *   Message received from Netlink.
  * @param[out] data
  *   Pointer to data area to be filled by the parsing routine.
- *   assumed to be a pinter to struct flow_tcf_stats_basic.
+ *   assumed to be a pointer to struct flow_tcf_stats_basic.
  *
  * @return
  *   MNL_CB_OK value.
  *
  * @return
  *   MNL_CB_OK value.
@@ -5542,7 +5696,7 @@ flow_tcf_query_count(struct rte_eth_dev *dev,
                          void *data,
                          struct rte_flow_error *error)
 {
                          void *data,
                          struct rte_flow_error *error)
 {
-       struct flow_tcf_stats_basic sb_data = { 0 };
+       struct flow_tcf_stats_basic sb_data;
        struct rte_flow_query_count *qc = data;
        struct priv *priv = dev->data->dev_private;
        struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
        struct rte_flow_query_count *qc = data;
        struct priv *priv = dev->data->dev_private;
        struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
@@ -5553,6 +5707,7 @@ flow_tcf_query_count(struct rte_eth_dev *dev,
        ssize_t ret;
        assert(qc);
 
        ssize_t ret;
        assert(qc);
 
+       memset(&sb_data, 0, sizeof(sb_data));
        dev_flow = LIST_FIRST(&flow->dev_flows);
        /* E-Switch flow can't be expanded. */
        assert(!LIST_NEXT(dev_flow, next));
        dev_flow = LIST_FIRST(&flow->dev_flows);
        /* E-Switch flow can't be expanded. */
        assert(!LIST_NEXT(dev_flow, next));
@@ -5714,7 +5869,7 @@ mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx,
        tcm->tcm_parent = TC_H_INGRESS;
        assert(sizeof(buf) >= nlh->nlmsg_len);
        /* Ignore errors when qdisc is already absent. */
        tcm->tcm_parent = TC_H_INGRESS;
        assert(sizeof(buf) >= nlh->nlmsg_len);
        /* Ignore errors when qdisc is already absent. */
-       if (flow_tcf_nl_ack(ctx, nlh, 0, NULL, NULL) &&
+       if (flow_tcf_nl_ack(ctx, nlh, NULL, NULL) &&
            rte_errno != EINVAL && rte_errno != ENOENT)
                return rte_flow_error_set(error, rte_errno,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
            rte_errno != EINVAL && rte_errno != ENOENT)
                return rte_flow_error_set(error, rte_errno,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -5731,7 +5886,7 @@ mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx,
        tcm->tcm_parent = TC_H_INGRESS;
        mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress");
        assert(sizeof(buf) >= nlh->nlmsg_len);
        tcm->tcm_parent = TC_H_INGRESS;
        mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress");
        assert(sizeof(buf) >= nlh->nlmsg_len);
-       if (flow_tcf_nl_ack(ctx, nlh, 0, NULL, NULL))
+       if (flow_tcf_nl_ack(ctx, nlh, NULL, NULL))
                return rte_flow_error_set(error, rte_errno,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                                          "netlink: failed to create ingress"
                return rte_flow_error_set(error, rte_errno,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                                          "netlink: failed to create ingress"
index d6d95db..81ec59d 100644 (file)
@@ -68,9 +68,10 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
        struct priv *priv = dev->data->dev_private;
        struct ibv_counters_init_attr init = {0};
 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
        struct priv *priv = dev->data->dev_private;
        struct ibv_counters_init_attr init = {0};
-       struct ibv_counter_attach_attr attach = {0};
+       struct ibv_counter_attach_attr attach;
        int ret;
 
        int ret;
 
+       memset(&attach, 0, sizeof(attach));
        counter->cs = mlx5_glue->create_counters(priv->ctx, &init);
        if (!counter->cs) {
                rte_errno = ENOTSUP;
        counter->cs = mlx5_glue->create_counters(priv->ctx, &init);
        if (!counter->cs) {
                rte_errno = ENOTSUP;
@@ -1017,6 +1018,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
        int ret;
        uint64_t action_flags = 0;
        uint64_t item_flags = 0;
        int ret;
        uint64_t action_flags = 0;
        uint64_t item_flags = 0;
+       uint64_t last_item = 0;
        uint8_t next_protocol = 0xff;
 
        if (items == NULL)
        uint8_t next_protocol = 0xff;
 
        if (items == NULL)
@@ -1036,26 +1038,26 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                                          error);
                        if (ret < 0)
                                return ret;
                                                          error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
-                                              MLX5_FLOW_LAYER_OUTER_L2;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+                                            MLX5_FLOW_LAYER_OUTER_L2;
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
                        ret = mlx5_flow_validate_item_vlan(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
                        ret = mlx5_flow_validate_item_vlan(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
-                                               MLX5_FLOW_LAYER_INNER_VLAN) :
-                                              (MLX5_FLOW_LAYER_OUTER_L2 |
-                                               MLX5_FLOW_LAYER_OUTER_VLAN);
+                       last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
+                                             MLX5_FLOW_LAYER_INNER_VLAN) :
+                                            (MLX5_FLOW_LAYER_OUTER_L2 |
+                                             MLX5_FLOW_LAYER_OUTER_VLAN);
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        ret = mlx5_flow_validate_item_ipv4(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        ret = mlx5_flow_validate_item_ipv4(items, item_flags,
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
-                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+                                            MLX5_FLOW_LAYER_OUTER_L3_IPV4;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv4 *)
                             items->mask)->hdr.next_proto_id) {
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv4 *)
                             items->mask)->hdr.next_proto_id) {
@@ -1075,8 +1077,8 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                                           error);
                        if (ret < 0)
                                return ret;
                                                           error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
-                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+                                            MLX5_FLOW_LAYER_OUTER_L3_IPV6;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv6 *)
                             items->mask)->hdr.proto) {
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv6 *)
                             items->mask)->hdr.proto) {
@@ -1097,8 +1099,8 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                                          error);
                        if (ret < 0)
                                return ret;
                                                          error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
-                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+                                            MLX5_FLOW_LAYER_OUTER_L4_UDP;
                        break;
                case RTE_FLOW_ITEM_TYPE_TCP:
                        ret = mlx5_flow_validate_item_tcp
                        break;
                case RTE_FLOW_ITEM_TYPE_TCP:
                        ret = mlx5_flow_validate_item_tcp
@@ -1108,15 +1110,15 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                                 error);
                        if (ret < 0)
                                return ret;
                                                 error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
-                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+                                            MLX5_FLOW_LAYER_OUTER_L4_TCP;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
                        ret = mlx5_flow_validate_item_vxlan(items, item_flags,
                                                            error);
                        if (ret < 0)
                                return ret;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
                        ret = mlx5_flow_validate_item_vxlan(items, item_flags,
                                                            error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_VXLAN;
+                       last_item = MLX5_FLOW_LAYER_VXLAN;
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
                        ret = mlx5_flow_validate_item_vxlan_gpe(items,
                        break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
                        ret = mlx5_flow_validate_item_vxlan_gpe(items,
@@ -1124,28 +1126,29 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                                                dev, error);
                        if (ret < 0)
                                return ret;
                                                                dev, error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+                       last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
                        ret = mlx5_flow_validate_item_gre(items, item_flags,
                                                          next_protocol, error);
                        if (ret < 0)
                                return ret;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
                        ret = mlx5_flow_validate_item_gre(items, item_flags,
                                                          next_protocol, error);
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_GRE;
+                       last_item = MLX5_FLOW_LAYER_GRE;
                        break;
                case RTE_FLOW_ITEM_TYPE_MPLS:
                        break;
                case RTE_FLOW_ITEM_TYPE_MPLS:
-                       ret = mlx5_flow_validate_item_mpls(items, item_flags,
-                                                          next_protocol,
-                                                          error);
+                       ret = mlx5_flow_validate_item_mpls(dev, items,
+                                                          item_flags,
+                                                          last_item, error);
                        if (ret < 0)
                                return ret;
                        if (ret < 0)
                                return ret;
-                       item_flags |= MLX5_FLOW_LAYER_MPLS;
+                       last_item = MLX5_FLOW_LAYER_MPLS;
                        break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "item not supported");
                }
                        break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  NULL, "item not supported");
                }
+               item_flags |= last_item;
        }
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
        }
        for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
                switch (actions->type) {
index f4b15d3..442b2d2 100644 (file)
@@ -342,8 +342,9 @@ mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr)
        DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
                dev->data->port_id, (void *)mr);
        for (n = 0; n < mr->ms_bmp_n; ) {
        DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
                dev->data->port_id, (void *)mr);
        for (n = 0; n < mr->ms_bmp_n; ) {
-               struct mlx5_mr_cache entry = { 0, };
+               struct mlx5_mr_cache entry;
 
 
+               memset(&entry, 0, sizeof(entry));
                /* Find a contiguous chunk and advance the index. */
                n = mr_find_next_chunk(mr, &entry, n);
                if (!entry.end)
                /* Find a contiguous chunk and advance the index. */
                n = mr_find_next_chunk(mr, &entry, n);
                if (!entry.end)
@@ -386,8 +387,9 @@ mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
                if (mr->ms_n == 0)
                        continue;
                for (n = 0; n < mr->ms_bmp_n; ) {
                if (mr->ms_n == 0)
                        continue;
                for (n = 0; n < mr->ms_bmp_n; ) {
-                       struct mlx5_mr_cache ret = { 0, };
+                       struct mlx5_mr_cache ret;
 
 
+                       memset(&ret, 0, sizeof(ret));
                        n = mr_find_next_chunk(mr, &ret, n);
                        if (addr >= ret.start && addr < ret.end) {
                                /* Found. */
                        n = mr_find_next_chunk(mr, &ret, n);
                        if (addr >= ret.start && addr < ret.end) {
                                /* Found. */
@@ -570,7 +572,7 @@ mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
         * Find out a contiguous virtual address chunk in use, to which the
         * given address belongs, in order to register maximum range. In the
         * best case where mempools are not dynamically recreated and
         * Find out a contiguous virtual address chunk in use, to which the
         * given address belongs, in order to register maximum range. In the
         * best case where mempools are not dynamically recreated and
-        * '--socket-mem' is speicified as an EAL option, it is very likely to
+        * '--socket-mem' is specified as an EAL option, it is very likely to
         * have only one MR(LKey) per a socket and per a hugepage-size even
         * though the system memory is highly fragmented.
         */
         * have only one MR(LKey) per a socket and per a hugepage-size even
         * though the system memory is highly fragmented.
         */
@@ -688,8 +690,9 @@ alloc_resources:
         */
        for (n = 0; n < ms_n; ++n) {
                uintptr_t start;
         */
        for (n = 0; n < ms_n; ++n) {
                uintptr_t start;
-               struct mlx5_mr_cache ret = { 0, };
+               struct mlx5_mr_cache ret;
 
 
+               memset(&ret, 0, sizeof(ret));
                start = data_re.start + n * msl->page_sz;
                /* Exclude memsegs already registered by other MRs. */
                if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
                start = data_re.start + n * msl->page_sz;
                /* Exclude memsegs already registered by other MRs. */
                if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
@@ -1042,7 +1045,7 @@ mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
  * @return
  *   Searched LKey on success, UINT32_MAX on no match.
  */
  * @return
  *   Searched LKey on success, UINT32_MAX on no match.
  */
-uint32_t
+static uint32_t
 mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
 {
        struct mlx5_txq_ctrl *txq_ctrl =
 mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
 {
        struct mlx5_txq_ctrl *txq_ctrl =
@@ -1056,6 +1059,32 @@ mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
        return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
 }
 
        return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
 }
 
+/**
+ * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
+ * list, register the mempool of the mbuf as externally allocated memory.
+ *
+ * @param txq
+ *   Pointer to Tx queue structure.
+ * @param mb
+ *   Pointer to mbuf.
+ *
+ * @return
+ *   Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
+{
+       uintptr_t addr = (uintptr_t)mb->buf_addr;
+       uint32_t lkey;
+
+       lkey = mlx5_tx_addr2mr_bh(txq, addr);
+       if (lkey == UINT32_MAX && rte_errno == ENXIO) {
+               /* Mempool may have externally allocated memory. */
+               return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
+       }
+       return lkey;
+}
+
 /**
  * Flush all of the local cache entries.
  *
 /**
  * Flush all of the local cache entries.
  *
index eef4850..183da0e 100644 (file)
@@ -1468,6 +1468,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        tmpl->rxq.mp = mp;
        tmpl->rxq.stats.idx = idx;
        tmpl->rxq.elts_n = log2above(desc);
        tmpl->rxq.mp = mp;
        tmpl->rxq.stats.idx = idx;
        tmpl->rxq.elts_n = log2above(desc);
+       tmpl->rxq.rq_repl_thresh =
+               MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
        tmpl->rxq.elts =
                (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
 #ifndef RTE_ARCH_64
        tmpl->rxq.elts =
                (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
 #ifndef RTE_ARCH_64
@@ -1782,7 +1784,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
        struct mlx5_ind_table_ibv *ind_tbl;
        struct ibv_qp *qp;
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
        struct mlx5_ind_table_ibv *ind_tbl;
        struct ibv_qp *qp;
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-       struct mlx5dv_qp_init_attr qp_init_attr = {0};
+       struct mlx5dv_qp_init_attr qp_init_attr;
 #endif
        int err;
 
 #endif
        int err;
 
@@ -1795,6 +1797,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
                return NULL;
        }
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
                return NULL;
        }
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+       memset(&qp_init_attr, 0, sizeof(qp_init_attr));
        if (tunnel) {
                qp_init_attr.comp_mask =
                                MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
        if (tunnel) {
                qp_init_attr.comp_mask =
                                MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
index 1b6200f..f47d327 100644 (file)
@@ -101,6 +101,7 @@ struct mlx5_rxq_data {
        uint16_t consumed_strd; /* Number of consumed strides in WQE. */
        uint32_t rq_pi;
        uint32_t cq_ci;
        uint16_t consumed_strd; /* Number of consumed strides in WQE. */
        uint32_t rq_pi;
        uint32_t cq_ci;
+       uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
        struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
        uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
        volatile void *wqes;
        struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
        uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
        volatile void *wqes;
@@ -363,7 +364,7 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
 
 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
 uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
 
 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
 uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
-uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
+uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
 uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
                               struct rte_mempool *mp);
 
 uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
                               struct rte_mempool *mp);
 
@@ -379,17 +380,16 @@ uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
  *   Address of the lock to use for that UAR access.
  */
 static __rte_always_inline void
  *   Address of the lock to use for that UAR access.
  */
 static __rte_always_inline void
-__mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr,
+__mlx5_uar_write64_relaxed(uint64_t val, void *addr,
                           rte_spinlock_t *lock __rte_unused)
 {
 #ifdef RTE_ARCH_64
                           rte_spinlock_t *lock __rte_unused)
 {
 #ifdef RTE_ARCH_64
-       rte_write64_relaxed(val, addr);
+       *(uint64_t *)addr = val;
 #else /* !RTE_ARCH_64 */
        rte_spinlock_lock(lock);
 #else /* !RTE_ARCH_64 */
        rte_spinlock_lock(lock);
-       rte_write32_relaxed(val, addr);
+       *(uint32_t *)addr = val;
        rte_io_wmb();
        rte_io_wmb();
-       rte_write32_relaxed(val >> 32,
-                           (volatile void *)((volatile char *)addr + 4));
+       *((uint32_t *)addr + 1) = val >> 32;
        rte_spinlock_unlock(lock);
 #endif
 }
        rte_spinlock_unlock(lock);
 #endif
 }
@@ -407,7 +407,7 @@ __mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr,
  *   Address of the lock to use for that UAR access.
  */
 static __rte_always_inline void
  *   Address of the lock to use for that UAR access.
  */
 static __rte_always_inline void
-__mlx5_uar_write64(uint64_t val, volatile void *addr, rte_spinlock_t *lock)
+__mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
 {
        rte_io_wmb();
        __mlx5_uar_write64_relaxed(val, addr, lock);
 {
        rte_io_wmb();
        __mlx5_uar_write64_relaxed(val, addr, lock);
@@ -619,7 +619,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
  * @return
  *   Memory pool where data is located for given mbuf.
  */
  * @return
  *   Memory pool where data is located for given mbuf.
  */
-static struct rte_mempool *
+static inline struct rte_mempool *
 mlx5_mb2mp(struct rte_mbuf *buf)
 {
        if (unlikely(RTE_MBUF_INDIRECT(buf)))
 mlx5_mb2mp(struct rte_mbuf *buf)
 {
        if (unlikely(RTE_MBUF_INDIRECT(buf)))
@@ -668,9 +668,10 @@ mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
  *   Searched LKey on success, UINT32_MAX on no match.
  */
 static __rte_always_inline uint32_t
  *   Searched LKey on success, UINT32_MAX on no match.
  */
 static __rte_always_inline uint32_t
-mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
 {
        struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
 {
        struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+       uintptr_t addr = (uintptr_t)mb->buf_addr;
        uint32_t lkey;
 
        /* Check generation bit to see if there's any change on existing MRs. */
        uint32_t lkey;
 
        /* Check generation bit to see if there's any change on existing MRs. */
@@ -681,23 +682,8 @@ mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
                                    MLX5_MR_CACHE_N, addr);
        if (likely(lkey != UINT32_MAX))
                return lkey;
                                    MLX5_MR_CACHE_N, addr);
        if (likely(lkey != UINT32_MAX))
                return lkey;
-       /* Take slower bottom-half (binary search) on miss. */
-       return mlx5_tx_addr2mr_bh(txq, addr);
-}
-
-static __rte_always_inline uint32_t
-mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
-{
-       uintptr_t addr = (uintptr_t)mb->buf_addr;
-       uint32_t lkey = mlx5_tx_addr2mr(txq, addr);
-
-       if (likely(lkey != UINT32_MAX))
-               return lkey;
-       if (rte_errno == ENXIO) {
-               /* Mempool may have externally allocated memory. */
-               lkey = mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
-       }
-       return lkey;
+       /* Take slower bottom-half on miss. */
+       return mlx5_tx_mb2mr_bh(txq, mb);
 }
 
 /**
 }
 
 /**
index 0b729f1..883fe1b 100644 (file)
@@ -732,7 +732,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
         *   N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
         */
        repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
         *   N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
         */
        repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
-       if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
+       if (repl_n >= rxq->rq_repl_thresh)
                mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
        /* See if there're unreturned mbufs from compressed CQE. */
        rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
                mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
        /* See if there're unreturned mbufs from compressed CQE. */
        rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
index e0f95f9..14117c4 100644 (file)
@@ -716,7 +716,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
         *   N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
         */
        repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
         *   N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
         */
        repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
-       if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
+       if (repl_n >= rxq->rq_repl_thresh)
                mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
        /* See if there're unreturned mbufs from compressed CQE. */
        rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
                mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
        /* See if there're unreturned mbufs from compressed CQE. */
        rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
index a14d1e4..fccb9af 100644 (file)
@@ -354,10 +354,11 @@ int
 mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        struct priv *priv = dev->data->dev_private;
 mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        struct priv *priv = dev->data->dev_private;
-       struct rte_eth_stats tmp = {0};
+       struct rte_eth_stats tmp;
        unsigned int i;
        unsigned int idx;
 
        unsigned int i;
        unsigned int idx;
 
+       memset(&tmp, 0, sizeof(tmp));
        /* Add software counters. */
        for (i = 0; (i != priv->rxqs_n); ++i) {
                struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
        /* Add software counters. */
        for (i = 0; (i != priv->rxqs_n); ++i) {
                struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
index 9cd53be..9adcd97 100644 (file)
@@ -422,7 +422,7 @@ mrvl_meter_profile_update(struct rte_eth_dev *dev, uint32_t mtr_id,
        struct mrvl_priv *priv = dev->data->dev_private;
        struct mrvl_mtr_profile *profile;
        struct mrvl_mtr *mtr;
        struct mrvl_priv *priv = dev->data->dev_private;
        struct mrvl_mtr_profile *profile;
        struct mrvl_mtr *mtr;
-       int ret, enabled;
+       int ret, enabled = 0;
 
        if (!priv->ppio)
                return -rte_mtr_error_set(error, EPERM,
 
        if (!priv->ppio)
                return -rte_mtr_error_set(error, EPERM,
index a3063be..046e129 100644 (file)
@@ -1238,15 +1238,8 @@ octeontx_probe(struct rte_vdev_device *dev)
                res = -EINVAL;
                goto parse_error;
        }
                res = -EINVAL;
                goto parse_error;
        }
-       if (pnum > qnum) {
-               /*
-                * We don't poll on event ports
-                * that do not have any queues assigned.
-                */
-               pnum = qnum;
-               PMD_INIT_LOG(INFO,
-                       "reducing number of active event ports to %d", pnum);
-       }
+
+       /* Enable all queues available */
        for (i = 0; i < qnum; i++) {
                res = rte_event_queue_setup(evdev, i, NULL);
                if (res < 0) {
        for (i = 0; i < qnum; i++) {
                res = rte_event_queue_setup(evdev, i, NULL);
                if (res < 0) {
@@ -1256,6 +1249,7 @@ octeontx_probe(struct rte_vdev_device *dev)
                }
        }
 
                }
        }
 
+       /* Enable all ports available */
        for (i = 0; i < pnum; i++) {
                res = rte_event_port_setup(evdev, i, NULL);
                if (res < 0) {
        for (i = 0; i < pnum; i++) {
                res = rte_event_port_setup(evdev, i, NULL);
                if (res < 0) {
@@ -1264,6 +1258,14 @@ octeontx_probe(struct rte_vdev_device *dev)
                                                i, res);
                        goto parse_error;
                }
                                                i, res);
                        goto parse_error;
                }
+       }
+
+       /*
+        * Do 1:1 links for ports & queues. All queues would be mapped to
+        * one port. If there are more ports than queues, then some ports
+        * won't be linked to any queue.
+        */
+       for (i = 0; i < qnum; i++) {
                /* Link one queue to one event port */
                qlist = i;
                res = rte_event_port_link(evdev, i, &qlist, NULL, 1);
                /* Link one queue to one event port */
                qlist = i;
                res = rte_event_port_link(evdev, i, &qlist, NULL, 1);
index 7bbe72e..9fd9327 100644 (file)
@@ -58,8 +58,8 @@ struct queue_stat {
 };
 
 struct pcap_rx_queue {
 };
 
 struct pcap_rx_queue {
-       pcap_t *pcap;
-       uint16_t in_port;
+       uint16_t port_id;
+       uint16_t queue_id;
        struct rte_mempool *mb_pool;
        struct queue_stat rx_stat;
        char name[PATH_MAX];
        struct rte_mempool *mb_pool;
        struct queue_stat rx_stat;
        char name[PATH_MAX];
@@ -67,8 +67,8 @@ struct pcap_rx_queue {
 };
 
 struct pcap_tx_queue {
 };
 
 struct pcap_tx_queue {
-       pcap_dumper_t *dumper;
-       pcap_t *pcap;
+       uint16_t port_id;
+       uint16_t queue_id;
        struct queue_stat tx_stat;
        char name[PATH_MAX];
        char type[ETH_PCAP_ARG_MAXLEN];
        struct queue_stat tx_stat;
        char name[PATH_MAX];
        char type[ETH_PCAP_ARG_MAXLEN];
@@ -77,12 +77,19 @@ struct pcap_tx_queue {
 struct pmd_internals {
        struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
        struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
 struct pmd_internals {
        struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
        struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
+       char devargs[ETH_PCAP_ARG_MAXLEN];
        struct ether_addr eth_addr;
        int if_index;
        int single_iface;
        int phy_mac;
 };
 
        struct ether_addr eth_addr;
        int if_index;
        int single_iface;
        int phy_mac;
 };
 
+struct pmd_process_private {
+       pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
+       pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
+       pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
+};
+
 struct pmd_devargs {
        unsigned int num_of_queue;
        struct devargs_queue {
 struct pmd_devargs {
        unsigned int num_of_queue;
        struct devargs_queue {
@@ -176,14 +183,19 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
        unsigned int i;
        struct pcap_pkthdr header;
 {
        unsigned int i;
        struct pcap_pkthdr header;
+       struct pmd_process_private *pp;
        const u_char *packet;
        struct rte_mbuf *mbuf;
        struct pcap_rx_queue *pcap_q = queue;
        uint16_t num_rx = 0;
        uint16_t buf_size;
        uint32_t rx_bytes = 0;
        const u_char *packet;
        struct rte_mbuf *mbuf;
        struct pcap_rx_queue *pcap_q = queue;
        uint16_t num_rx = 0;
        uint16_t buf_size;
        uint32_t rx_bytes = 0;
+       pcap_t *pcap;
+
+       pp = rte_eth_devices[pcap_q->port_id].process_private;
+       pcap = pp->rx_pcap[pcap_q->queue_id];
 
 
-       if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
+       if (unlikely(pcap == NULL || nb_pkts == 0))
                return 0;
 
        /* Reads the given number of packets from the pcap file one by one
                return 0;
 
        /* Reads the given number of packets from the pcap file one by one
@@ -191,7 +203,7 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
         */
        for (i = 0; i < nb_pkts; i++) {
                /* Get the next PCAP packet */
         */
        for (i = 0; i < nb_pkts; i++) {
                /* Get the next PCAP packet */
-               packet = pcap_next(pcap_q->pcap, &header);
+               packet = pcap_next(pcap, &header);
                if (unlikely(packet == NULL))
                        break;
 
                if (unlikely(packet == NULL))
                        break;
 
@@ -220,7 +232,7 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                }
 
                mbuf->pkt_len = (uint16_t)header.caplen;
                }
 
                mbuf->pkt_len = (uint16_t)header.caplen;
-               mbuf->port = pcap_q->in_port;
+               mbuf->port = pcap_q->port_id;
                bufs[num_rx] = mbuf;
                num_rx++;
                rx_bytes += header.caplen;
                bufs[num_rx] = mbuf;
                num_rx++;
                rx_bytes += header.caplen;
@@ -250,12 +262,17 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
        unsigned int i;
        struct rte_mbuf *mbuf;
 {
        unsigned int i;
        struct rte_mbuf *mbuf;
+       struct pmd_process_private *pp;
        struct pcap_tx_queue *dumper_q = queue;
        uint16_t num_tx = 0;
        uint32_t tx_bytes = 0;
        struct pcap_pkthdr header;
        struct pcap_tx_queue *dumper_q = queue;
        uint16_t num_tx = 0;
        uint32_t tx_bytes = 0;
        struct pcap_pkthdr header;
+       pcap_dumper_t *dumper;
+
+       pp = rte_eth_devices[dumper_q->port_id].process_private;
+       dumper = pp->tx_dumper[dumper_q->queue_id];
 
 
-       if (dumper_q->dumper == NULL || nb_pkts == 0)
+       if (dumper == NULL || nb_pkts == 0)
                return 0;
 
        /* writes the nb_pkts packets to the previously opened pcap file
                return 0;
 
        /* writes the nb_pkts packets to the previously opened pcap file
@@ -267,12 +284,12 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                header.caplen = header.len;
 
                if (likely(mbuf->nb_segs == 1)) {
                header.caplen = header.len;
 
                if (likely(mbuf->nb_segs == 1)) {
-                       pcap_dump((u_char *)dumper_q->dumper, &header,
+                       pcap_dump((u_char *)dumper, &header,
                                  rte_pktmbuf_mtod(mbuf, void*));
                } else {
                        if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
                                eth_pcap_gather_data(tx_pcap_data, mbuf);
                                  rte_pktmbuf_mtod(mbuf, void*));
                } else {
                        if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
                                eth_pcap_gather_data(tx_pcap_data, mbuf);
-                               pcap_dump((u_char *)dumper_q->dumper, &header,
+                               pcap_dump((u_char *)dumper, &header,
                                          tx_pcap_data);
                        } else {
                                PMD_LOG(ERR,
                                          tx_pcap_data);
                        } else {
                                PMD_LOG(ERR,
@@ -295,7 +312,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
         * process stops and to make sure the pcap file is actually written,
         * we flush the pcap dumper within each burst.
         */
         * process stops and to make sure the pcap file is actually written,
         * we flush the pcap dumper within each burst.
         */
-       pcap_dump_flush(dumper_q->dumper);
+       pcap_dump_flush(dumper);
        dumper_q->tx_stat.pkts += num_tx;
        dumper_q->tx_stat.bytes += tx_bytes;
        dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
        dumper_q->tx_stat.pkts += num_tx;
        dumper_q->tx_stat.bytes += tx_bytes;
        dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
@@ -312,24 +329,29 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        unsigned int i;
        int ret;
        struct rte_mbuf *mbuf;
        unsigned int i;
        int ret;
        struct rte_mbuf *mbuf;
+       struct pmd_process_private *pp;
        struct pcap_tx_queue *tx_queue = queue;
        uint16_t num_tx = 0;
        uint32_t tx_bytes = 0;
        struct pcap_tx_queue *tx_queue = queue;
        uint16_t num_tx = 0;
        uint32_t tx_bytes = 0;
+       pcap_t *pcap;
 
 
-       if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
+       pp = rte_eth_devices[tx_queue->port_id].process_private;
+       pcap = pp->tx_pcap[tx_queue->queue_id];
+
+       if (unlikely(nb_pkts == 0 || pcap == NULL))
                return 0;
 
        for (i = 0; i < nb_pkts; i++) {
                mbuf = bufs[i];
 
                if (likely(mbuf->nb_segs == 1)) {
                return 0;
 
        for (i = 0; i < nb_pkts; i++) {
                mbuf = bufs[i];
 
                if (likely(mbuf->nb_segs == 1)) {
-                       ret = pcap_sendpacket(tx_queue->pcap,
+                       ret = pcap_sendpacket(pcap,
                                        rte_pktmbuf_mtod(mbuf, u_char *),
                                        mbuf->pkt_len);
                } else {
                        if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
                                eth_pcap_gather_data(tx_pcap_data, mbuf);
                                        rte_pktmbuf_mtod(mbuf, u_char *),
                                        mbuf->pkt_len);
                } else {
                        if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
                                eth_pcap_gather_data(tx_pcap_data, mbuf);
-                               ret = pcap_sendpacket(tx_queue->pcap,
+                               ret = pcap_sendpacket(pcap,
                                                tx_pcap_data, mbuf->pkt_len);
                        } else {
                                PMD_LOG(ERR,
                                                tx_pcap_data, mbuf->pkt_len);
                        } else {
                                PMD_LOG(ERR,
@@ -430,6 +452,7 @@ eth_dev_start(struct rte_eth_dev *dev)
 {
        unsigned int i;
        struct pmd_internals *internals = dev->data->dev_private;
 {
        unsigned int i;
        struct pmd_internals *internals = dev->data->dev_private;
+       struct pmd_process_private *pp = dev->process_private;
        struct pcap_tx_queue *tx;
        struct pcap_rx_queue *rx;
 
        struct pcap_tx_queue *tx;
        struct pcap_rx_queue *rx;
 
@@ -438,10 +461,11 @@ eth_dev_start(struct rte_eth_dev *dev)
                tx = &internals->tx_queue[0];
                rx = &internals->rx_queue[0];
 
                tx = &internals->tx_queue[0];
                rx = &internals->rx_queue[0];
 
-               if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
-                       if (open_single_iface(tx->name, &tx->pcap) < 0)
+               if (!pp->tx_pcap[0] &&
+                       strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
+                       if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0)
                                return -1;
                                return -1;
-                       rx->pcap = tx->pcap;
+                       pp->rx_pcap[0] = pp->tx_pcap[0];
                }
 
                goto status_up;
                }
 
                goto status_up;
@@ -451,13 +475,14 @@ eth_dev_start(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                tx = &internals->tx_queue[i];
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                tx = &internals->tx_queue[i];
 
-               if (!tx->dumper &&
+               if (!pp->tx_dumper[i] &&
                                strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
                                strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
-                       if (open_single_tx_pcap(tx->name, &tx->dumper) < 0)
+                       if (open_single_tx_pcap(tx->name,
+                               &pp->tx_dumper[i]) < 0)
                                return -1;
                                return -1;
-               } else if (!tx->pcap &&
+               } else if (!pp->tx_pcap[i] &&
                                strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
                                strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
-                       if (open_single_iface(tx->name, &tx->pcap) < 0)
+                       if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0)
                                return -1;
                }
        }
                                return -1;
                }
        }
@@ -466,14 +491,14 @@ eth_dev_start(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rx = &internals->rx_queue[i];
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rx = &internals->rx_queue[i];
 
-               if (rx->pcap != NULL)
+               if (pp->rx_pcap[i] != NULL)
                        continue;
 
                if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
                        continue;
 
                if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
-                       if (open_single_rx_pcap(rx->name, &rx->pcap) < 0)
+                       if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0)
                                return -1;
                } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
                                return -1;
                } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
-                       if (open_single_iface(rx->name, &rx->pcap) < 0)
+                       if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0)
                                return -1;
                }
        }
                                return -1;
                }
        }
@@ -500,39 +525,32 @@ eth_dev_stop(struct rte_eth_dev *dev)
 {
        unsigned int i;
        struct pmd_internals *internals = dev->data->dev_private;
 {
        unsigned int i;
        struct pmd_internals *internals = dev->data->dev_private;
-       struct pcap_tx_queue *tx;
-       struct pcap_rx_queue *rx;
+       struct pmd_process_private *pp = dev->process_private;
 
        /* Special iface case. Single pcap is open and shared between tx/rx. */
        if (internals->single_iface) {
 
        /* Special iface case. Single pcap is open and shared between tx/rx. */
        if (internals->single_iface) {
-               tx = &internals->tx_queue[0];
-               rx = &internals->rx_queue[0];
-               pcap_close(tx->pcap);
-               tx->pcap = NULL;
-               rx->pcap = NULL;
+               pcap_close(pp->tx_pcap[0]);
+               pp->tx_pcap[0] = NULL;
+               pp->rx_pcap[0] = NULL;
                goto status_down;
        }
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                goto status_down;
        }
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               tx = &internals->tx_queue[i];
-
-               if (tx->dumper != NULL) {
-                       pcap_dump_close(tx->dumper);
-                       tx->dumper = NULL;
+               if (pp->tx_dumper[i] != NULL) {
+                       pcap_dump_close(pp->tx_dumper[i]);
+                       pp->tx_dumper[i] = NULL;
                }
 
                }
 
-               if (tx->pcap != NULL) {
-                       pcap_close(tx->pcap);
-                       tx->pcap = NULL;
+               if (pp->tx_pcap[i] != NULL) {
+                       pcap_close(pp->tx_pcap[i]);
+                       pp->tx_pcap[i] = NULL;
                }
        }
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                }
        }
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rx = &internals->rx_queue[i];
-
-               if (rx->pcap != NULL) {
-                       pcap_close(rx->pcap);
-                       rx->pcap = NULL;
+               if (pp->rx_pcap[i] != NULL) {
+                       pcap_close(pp->rx_pcap[i]);
+                       pp->rx_pcap[i] = NULL;
                }
        }
 
                }
        }
 
@@ -649,8 +667,9 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
        struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
 
        pcap_q->mb_pool = mb_pool;
        struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
 
        pcap_q->mb_pool = mb_pool;
+       pcap_q->port_id = dev->data->port_id;
+       pcap_q->queue_id = rx_queue_id;
        dev->data->rx_queues[rx_queue_id] = pcap_q;
        dev->data->rx_queues[rx_queue_id] = pcap_q;
-       pcap_q->in_port = dev->data->port_id;
 
        return 0;
 }
 
        return 0;
 }
@@ -663,8 +682,11 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
                const struct rte_eth_txconf *tx_conf __rte_unused)
 {
        struct pmd_internals *internals = dev->data->dev_private;
                const struct rte_eth_txconf *tx_conf __rte_unused)
 {
        struct pmd_internals *internals = dev->data->dev_private;
+       struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
 
 
-       dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
+       pcap_q->port_id = dev->data->port_id;
+       pcap_q->queue_id = tx_queue_id;
+       dev->data->tx_queues[tx_queue_id] = pcap_q;
 
        return 0;
 }
 
        return 0;
 }
@@ -896,16 +918,29 @@ pmd_init_internals(struct rte_vdev_device *vdev,
                struct rte_eth_dev **eth_dev)
 {
        struct rte_eth_dev_data *data;
                struct rte_eth_dev **eth_dev)
 {
        struct rte_eth_dev_data *data;
+       struct pmd_process_private *pp;
        unsigned int numa_node = vdev->device.numa_node;
 
        PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
                numa_node);
 
        unsigned int numa_node = vdev->device.numa_node;
 
        PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
                numa_node);
 
+       pp = (struct pmd_process_private *)
+               rte_zmalloc(NULL, sizeof(struct pmd_process_private),
+                               RTE_CACHE_LINE_SIZE);
+
+       if (pp == NULL) {
+               PMD_LOG(ERR,
+                       "Failed to allocate memory for process private");
+               return -1;
+       }
+
        /* reserve an ethdev entry */
        *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
        /* reserve an ethdev entry */
        *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
-       if (!(*eth_dev))
+       if (!(*eth_dev)) {
+               rte_free(pp);
                return -1;
                return -1;
-
+       }
+       (*eth_dev)->process_private = pp;
        /* now put it all together
         * - store queue data in internals,
         * - store numa_node info in eth_dev
        /* now put it all together
         * - store queue data in internals,
         * - store numa_node info in eth_dev
@@ -934,6 +969,9 @@ pmd_init_internals(struct rte_vdev_device *vdev,
         */
        (*eth_dev)->dev_ops = &ops;
 
         */
        (*eth_dev)->dev_ops = &ops;
 
+       strlcpy((*internals)->devargs, rte_vdev_device_args(vdev),
+                       ETH_PCAP_ARG_MAXLEN);
+
        return 0;
 }
 
        return 0;
 }
 
@@ -1027,6 +1065,7 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev,
                struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
                struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
 {
                struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
                struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
 {
+       struct pmd_process_private *pp;
        unsigned int i;
 
        /* do some parameter checking */
        unsigned int i;
 
        /* do some parameter checking */
@@ -1039,11 +1078,12 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev,
                        eth_dev) < 0)
                return -1;
 
                        eth_dev) < 0)
                return -1;
 
+       pp = (*eth_dev)->process_private;
        for (i = 0; i < nb_rx_queues; i++) {
                struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
                struct devargs_queue *queue = &rx_queues->queue[i];
 
        for (i = 0; i < nb_rx_queues; i++) {
                struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
                struct devargs_queue *queue = &rx_queues->queue[i];
 
-               rx->pcap = queue->pcap;
+               pp->rx_pcap[i] = queue->pcap;
                snprintf(rx->name, sizeof(rx->name), "%s", queue->name);
                snprintf(rx->type, sizeof(rx->type), "%s", queue->type);
        }
                snprintf(rx->name, sizeof(rx->name), "%s", queue->name);
                snprintf(rx->type, sizeof(rx->type), "%s", queue->type);
        }
@@ -1052,8 +1092,8 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev,
                struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
                struct devargs_queue *queue = &tx_queues->queue[i];
 
                struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
                struct devargs_queue *queue = &tx_queues->queue[i];
 
-               tx->dumper = queue->dumper;
-               tx->pcap = queue->pcap;
+               pp->tx_dumper[i] = queue->dumper;
+               pp->tx_pcap[i] = queue->pcap;
                snprintf(tx->name, sizeof(tx->name), "%s", queue->name);
                snprintf(tx->type, sizeof(tx->type), "%s", queue->type);
        }
                snprintf(tx->name, sizeof(tx->name), "%s", queue->name);
                snprintf(tx->type, sizeof(tx->type), "%s", queue->type);
        }
@@ -1111,7 +1151,8 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
        struct rte_kvargs *kvlist;
        struct pmd_devargs pcaps = {0};
        struct pmd_devargs dumpers = {0};
        struct rte_kvargs *kvlist;
        struct pmd_devargs pcaps = {0};
        struct pmd_devargs dumpers = {0};
-       struct rte_eth_dev *eth_dev;
+       struct rte_eth_dev *eth_dev =  NULL;
+       struct pmd_internals *internal;
        int single_iface = 0;
        int ret;
 
        int single_iface = 0;
        int ret;
 
@@ -1128,16 +1169,18 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
                        PMD_LOG(ERR, "Failed to probe %s", name);
                        return -1;
                }
                        PMD_LOG(ERR, "Failed to probe %s", name);
                        return -1;
                }
-               /* TODO: request info from primary to set up Rx and Tx */
-               eth_dev->dev_ops = &ops;
-               eth_dev->device = &dev->device;
-               rte_eth_dev_probing_finish(eth_dev);
-               return 0;
-       }
 
 
-       kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
-       if (kvlist == NULL)
-               return -1;
+               internal = eth_dev->data->dev_private;
+
+               kvlist = rte_kvargs_parse(internal->devargs, valid_arguments);
+               if (kvlist == NULL)
+                       return -1;
+       } else {
+               kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
+                               valid_arguments);
+               if (kvlist == NULL)
+                       return -1;
+       }
 
        /*
         * If iface argument is passed we open the NICs and use them for
 
        /*
         * If iface argument is passed we open the NICs and use them for
@@ -1202,6 +1245,45 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
                goto free_kvlist;
 
 create_eth:
                goto free_kvlist;
 
 create_eth:
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+               struct pmd_process_private *pp;
+               unsigned int i;
+
+               internal = eth_dev->data->dev_private;
+                       pp = (struct pmd_process_private *)
+                               rte_zmalloc(NULL,
+                                       sizeof(struct pmd_process_private),
+                                       RTE_CACHE_LINE_SIZE);
+
+               if (pp == NULL) {
+                       PMD_LOG(ERR,
+                               "Failed to allocate memory for process private");
+                       return -1;
+               }
+
+               eth_dev->dev_ops = &ops;
+               eth_dev->device = &dev->device;
+
+               /* setup process private */
+               for (i = 0; i < pcaps.num_of_queue; i++)
+                       pp->rx_pcap[i] = pcaps.queue[i].pcap;
+
+               for (i = 0; i < dumpers.num_of_queue; i++) {
+                       pp->tx_dumper[i] = dumpers.queue[i].dumper;
+                       pp->tx_pcap[i] = dumpers.queue[i].pcap;
+               }
+
+               eth_dev->process_private = pp;
+               eth_dev->rx_pkt_burst = eth_pcap_rx;
+               if (is_tx_pcap)
+                       eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
+               else
+                       eth_dev->tx_pkt_burst = eth_pcap_tx;
+
+               rte_eth_dev_probing_finish(eth_dev);
+               return 0;
+       }
+
        ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
                dumpers.num_of_queue, single_iface, is_tx_pcap);
 
        ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
                dumpers.num_of_queue, single_iface, is_tx_pcap);
 
@@ -1235,6 +1317,7 @@ pmd_pcap_remove(struct rte_vdev_device *dev)
                        eth_dev->data->mac_addrs = NULL;
        }
 
                        eth_dev->data->mac_addrs = NULL;
        }
 
+       rte_free(eth_dev->process_private);
        rte_eth_dev_release_port(eth_dev);
 
        return 0;
        rte_eth_dev_release_port(eth_dev);
 
        return 0;
index 4073a49..428ca46 100644 (file)
@@ -266,8 +266,6 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
        hdr->tx_offload = pkt->tx_offload;
        hdr->hash = pkt->hash;
 
        hdr->tx_offload = pkt->tx_offload;
        hdr->hash = pkt->hash;
 
-       hdr->ol_flags = pkt->ol_flags;
-
        __rte_mbuf_sanity_check(hdr, 1);
        return hdr;
 }
        __rte_mbuf_sanity_check(hdr, 1);
        return hdr;
 }
index cbb5e49..f08babd 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <stdio.h>
 #include <stdlib.h>
 
 #include <stdio.h>
 #include <stdlib.h>
+#include <string.h>
 #include <unistd.h>
 #include <stdbool.h>
 #include <assert.h>
 #include <unistd.h>
 #include <stdbool.h>
 #include <assert.h>
@@ -442,6 +443,9 @@ free_resource(void)
                struct lcore_option *lo = &options.los[i];
                struct vhost_crypto_info *info = options.infos[i];
 
                struct lcore_option *lo = &options.los[i];
                struct vhost_crypto_info *info = options.infos[i];
 
+               if (!info)
+                       continue;
+
                rte_mempool_free(info->cop_pool);
                rte_mempool_free(info->sess_pool);
 
                rte_mempool_free(info->cop_pool);
                rte_mempool_free(info->sess_pool);
 
@@ -493,6 +497,19 @@ main(int argc, char *argv[])
                info->nb_vids = lo->nb_sockets;
 
                rte_cryptodev_info_get(info->cid, &dev_info);
                info->nb_vids = lo->nb_sockets;
 
                rte_cryptodev_info_get(info->cid, &dev_info);
+               if (options.zero_copy == RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE) {
+#define VHOST_CRYPTO_CDEV_NAME_AESNI_MB_PMD    crypto_aesni_mb
+#define VHOST_CRYPTO_CDEV_NAME_AESNI_GCM_PMD   crypto_aesni_gcm
+                       if (strstr(dev_info.driver_name,
+                               RTE_STR(VHOST_CRYPTO_CDEV_NAME_AESNI_MB_PMD)) ||
+                               strstr(dev_info.driver_name,
+                               RTE_STR(VHOST_CRYPTO_CDEV_NAME_AESNI_GCM_PMD)))
+                       RTE_LOG(ERR, USER1, "Cannot enable zero-copy in %s\n",
+                                       dev_info.driver_name);
+                       ret = -EPERM;
+                       goto error_exit;
+               }
+
                if (dev_info.max_nb_queue_pairs < info->qid + 1) {
                        RTE_LOG(ERR, USER1, "Number of queues cannot over %u",
                                        dev_info.max_nb_queue_pairs);
                if (dev_info.max_nb_queue_pairs < info->qid + 1) {
                        RTE_LOG(ERR, USER1, "Number of queues cannot over %u",
                                        dev_info.max_nb_queue_pairs);
index 1d41ea1..3712719 100644 (file)
@@ -77,7 +77,7 @@ eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
        const struct rte_memseg *ms;
 
        /* for IOVA_VA, it's always contiguous */
        const struct rte_memseg *ms;
 
        /* for IOVA_VA, it's always contiguous */
-       if (rte_eal_iova_mode() == RTE_IOVA_VA)
+       if (rte_eal_iova_mode() == RTE_IOVA_VA && !msl->external)
                return true;
 
        /* for legacy memory, it's always contiguous */
                return true;
 
        /* for legacy memory, it's always contiguous */
index b3e8ae5..6e0331f 100644 (file)
 int
 eal_create_runtime_dir(void);
 
 int
 eal_create_runtime_dir(void);
 
-/* returns runtime dir */
-const char *
-rte_eal_get_runtime_dir(void);
-
 #define RUNTIME_CONFIG_FNAME "config"
 static inline const char *
 eal_runtime_config_path(void)
 #define RUNTIME_CONFIG_FNAME "config"
 static inline const char *
 eal_runtime_config_path(void)
index fc26e97..45b8943 100644 (file)
@@ -49,7 +49,7 @@ extern "C" {
  *   0-15 = release candidates
  *   16   = release
  */
  *   0-15 = release candidates
  *   16   = release
  */
-#define RTE_VER_RELEASE 3
+#define RTE_VER_RELEASE 4
 
 /**
  * Macro to compute a version number usable for comparisons
 
 /**
  * Macro to compute a version number usable for comparisons
index 1a74660..9d3dcb6 100644 (file)
@@ -316,13 +316,15 @@ remove_elem(struct malloc_elem *elem)
 static int
 next_elem_is_adjacent(struct malloc_elem *elem)
 {
 static int
 next_elem_is_adjacent(struct malloc_elem *elem)
 {
-       return elem->next == RTE_PTR_ADD(elem, elem->size);
+       return elem->next == RTE_PTR_ADD(elem, elem->size) &&
+                       elem->next->msl == elem->msl;
 }
 
 static int
 prev_elem_is_adjacent(struct malloc_elem *elem)
 {
 }
 
 static int
 prev_elem_is_adjacent(struct malloc_elem *elem)
 {
-       return elem == RTE_PTR_ADD(elem->prev, elem->prev->size);
+       return elem == RTE_PTR_ADD(elem->prev, elem->prev->size) &&
+                       elem->prev->msl == elem->msl;
 }
 
 /*
 }
 
 /*
index d589c69..2830c86 100644 (file)
@@ -258,7 +258,7 @@ dev_uev_handler(__rte_unused void *param)
                        if (bus == NULL) {
                                RTE_LOG(ERR, EAL, "Cannot find bus (%s)\n",
                                        busname);
                        if (bus == NULL) {
                                RTE_LOG(ERR, EAL, "Cannot find bus (%s)\n",
                                        busname);
-                               return;
+                               goto failure_handle_err;
                        }
 
                        dev = bus->find_device(NULL, cmp_dev_name,
                        }
 
                        dev = bus->find_device(NULL, cmp_dev_name,
@@ -266,19 +266,23 @@ dev_uev_handler(__rte_unused void *param)
                        if (dev == NULL) {
                                RTE_LOG(ERR, EAL, "Cannot find device (%s) on "
                                        "bus (%s)\n", uevent.devname, busname);
                        if (dev == NULL) {
                                RTE_LOG(ERR, EAL, "Cannot find device (%s) on "
                                        "bus (%s)\n", uevent.devname, busname);
-                               return;
+                               goto failure_handle_err;
                        }
 
                        ret = bus->hot_unplug_handler(dev);
                        }
 
                        ret = bus->hot_unplug_handler(dev);
-                       rte_spinlock_unlock(&failure_handle_lock);
                        if (ret) {
                                RTE_LOG(ERR, EAL, "Can not handle hot-unplug "
                                        "for device (%s)\n", dev->name);
                        if (ret) {
                                RTE_LOG(ERR, EAL, "Can not handle hot-unplug "
                                        "for device (%s)\n", dev->name);
-                               return;
                        }
                        }
+                       rte_spinlock_unlock(&failure_handle_lock);
                }
                rte_dev_event_callback_process(uevent.devname, uevent.type);
        }
                }
                rte_dev_event_callback_process(uevent.devname, uevent.type);
        }
+
+       return;
+
+failure_handle_err:
+       rte_spinlock_unlock(&failure_handle_lock);
 }
 
 int __rte_experimental
 }
 
 int __rte_experimental
index 48b9c73..7849395 100644 (file)
@@ -753,9 +753,6 @@ mapped:
        munmap(addr, alloc_sz);
 unmapped:
        flags = MAP_FIXED;
        munmap(addr, alloc_sz);
 unmapped:
        flags = MAP_FIXED;
-#ifdef RTE_ARCH_PPC_64
-       flags |= MAP_HUGETLB;
-#endif
        new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
        if (new_addr != addr) {
                if (new_addr != NULL)
        new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
        if (new_addr != addr) {
                if (new_addr != NULL)
index 48b23ce..6f94621 100644 (file)
@@ -847,10 +847,6 @@ alloc_va_space(struct rte_memseg_list *msl)
        void *addr;
        int flags = 0;
 
        void *addr;
        int flags = 0;
 
-#ifdef RTE_ARCH_PPC_64
-       flags |= MAP_HUGETLB;
-#endif
-
        page_sz = msl->page_sz;
        mem_sz = page_sz * msl->memseg_arr.len;
 
        page_sz = msl->page_sz;
        mem_sz = page_sz * msl->memseg_arr.len;
 
index a780e2f..e6e5cfd 100644 (file)
@@ -692,7 +692,8 @@ rte_efd_create(const char *name, uint32_t max_num_rules, uint32_t key_len,
                        offline_cpu_socket, 0);
        if (r == NULL) {
                RTE_LOG(ERR, EFD, "memory allocation failed\n");
                        offline_cpu_socket, 0);
        if (r == NULL) {
                RTE_LOG(ERR, EFD, "memory allocation failed\n");
-               goto error_unlock_exit;
+               rte_efd_free(table);
+               return NULL;
        }
 
        /* Populate free slots ring. Entry zero is reserved for key misses. */
        }
 
        /* Populate free slots ring. Entry zero is reserved for key misses. */
index 8daebea..bd92343 100644 (file)
@@ -10,7 +10,6 @@ LIB = librte_security.a
 LIBABIVER := 1
 
 # build flags
 LIBABIVER := 1
 
 # build flags
-CFLAGS += -DALLOW_EXPERIMENTAL_API
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
 LDLIBS += -lrte_eal -lrte_mempool
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
 LDLIBS += -lrte_eal -lrte_mempool
index 4c85894..532953f 100644 (file)
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2017 Intel Corporation
 
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2017 Intel Corporation
 
-allow_experimental_apis = true
 sources = files('rte_security.c')
 headers = files('rte_security.h', 'rte_security_driver.h')
 deps += ['mempool', 'cryptodev']
 sources = files('rte_security.c')
 headers = files('rte_security.h', 'rte_security_driver.h')
 deps += ['mempool', 'cryptodev']
index c6355de..bc81ce1 100644 (file)
@@ -10,7 +10,7 @@
 #include "rte_security_driver.h"
 
 struct rte_security_session *
 #include "rte_security_driver.h"
 
 struct rte_security_session *
-__rte_experimental rte_security_session_create(struct rte_security_ctx *instance,
+rte_security_session_create(struct rte_security_ctx *instance,
                            struct rte_security_session_conf *conf,
                            struct rte_mempool *mp)
 {
                            struct rte_security_session_conf *conf,
                            struct rte_mempool *mp)
 {
@@ -33,7 +33,7 @@ __rte_experimental rte_security_session_create(struct rte_security_ctx *instance
        return sess;
 }
 
        return sess;
 }
 
-int __rte_experimental
+int
 rte_security_session_update(struct rte_security_ctx *instance,
                            struct rte_security_session *sess,
                            struct rte_security_session_conf *conf)
 rte_security_session_update(struct rte_security_ctx *instance,
                            struct rte_security_session *sess,
                            struct rte_security_session_conf *conf)
@@ -42,14 +42,14 @@ rte_security_session_update(struct rte_security_ctx *instance,
        return instance->ops->session_update(instance->device, sess, conf);
 }
 
        return instance->ops->session_update(instance->device, sess, conf);
 }
 
-unsigned int __rte_experimental
+unsigned int
 rte_security_session_get_size(struct rte_security_ctx *instance)
 {
        RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_get_size, 0);
        return instance->ops->session_get_size(instance->device);
 }
 
 rte_security_session_get_size(struct rte_security_ctx *instance)
 {
        RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_get_size, 0);
        return instance->ops->session_get_size(instance->device);
 }
 
-int __rte_experimental
+int
 rte_security_session_stats_get(struct rte_security_ctx *instance,
                               struct rte_security_session *sess,
                               struct rte_security_stats *stats)
 rte_security_session_stats_get(struct rte_security_ctx *instance,
                               struct rte_security_session *sess,
                               struct rte_security_stats *stats)
@@ -58,7 +58,7 @@ rte_security_session_stats_get(struct rte_security_ctx *instance,
        return instance->ops->session_stats_get(instance->device, sess, stats);
 }
 
        return instance->ops->session_stats_get(instance->device, sess, stats);
 }
 
-int __rte_experimental
+int
 rte_security_session_destroy(struct rte_security_ctx *instance,
                             struct rte_security_session *sess)
 {
 rte_security_session_destroy(struct rte_security_ctx *instance,
                             struct rte_security_session *sess)
 {
@@ -76,7 +76,7 @@ rte_security_session_destroy(struct rte_security_ctx *instance,
        return ret;
 }
 
        return ret;
 }
 
-int __rte_experimental
+int
 rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
                              struct rte_security_session *sess,
                              struct rte_mbuf *m, void *params)
 rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
                              struct rte_security_session *sess,
                              struct rte_mbuf *m, void *params)
@@ -86,7 +86,7 @@ rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
                                               sess, m, params);
 }
 
                                               sess, m, params);
 }
 
-void * __rte_experimental
+void *
 rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md)
 {
        void *userdata = NULL;
 rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md)
 {
        void *userdata = NULL;
@@ -98,14 +98,14 @@ rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md)
        return userdata;
 }
 
        return userdata;
 }
 
-const struct rte_security_capability * __rte_experimental
+const struct rte_security_capability *
 rte_security_capabilities_get(struct rte_security_ctx *instance)
 {
        RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->capabilities_get, NULL);
        return instance->ops->capabilities_get(instance->device);
 }
 
 rte_security_capabilities_get(struct rte_security_ctx *instance)
 {
        RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->capabilities_get, NULL);
        return instance->ops->capabilities_get(instance->device);
 }
 
-const struct rte_security_capability * __rte_experimental
+const struct rte_security_capability *
 rte_security_capability_get(struct rte_security_ctx *instance,
                            struct rte_security_capability_idx *idx)
 {
 rte_security_capability_get(struct rte_security_ctx *instance,
                            struct rte_security_capability_idx *idx)
 {
index 1431b4d..e07b132 100644 (file)
@@ -8,7 +8,6 @@
 
 /**
  * @file rte_security.h
 
 /**
  * @file rte_security.h
- * @b EXPERIMENTAL: this API may change without prior notice
  *
  * RTE Security Common Definitions
  *
  *
  * RTE Security Common Definitions
  *
@@ -330,7 +329,7 @@ struct rte_security_session {
  *  - On success, pointer to session
  *  - On failure, NULL
  */
  *  - On success, pointer to session
  *  - On failure, NULL
  */
-struct rte_security_session * __rte_experimental
+struct rte_security_session *
 rte_security_session_create(struct rte_security_ctx *instance,
                            struct rte_security_session_conf *conf,
                            struct rte_mempool *mp);
 rte_security_session_create(struct rte_security_ctx *instance,
                            struct rte_security_session_conf *conf,
                            struct rte_mempool *mp);
@@ -345,7 +344,7 @@ rte_security_session_create(struct rte_security_ctx *instance,
  *  - On success returns 0
  *  - On failure return errno
  */
  *  - On success returns 0
  *  - On failure return errno
  */
-int __rte_experimental
+int
 rte_security_session_update(struct rte_security_ctx *instance,
                            struct rte_security_session *sess,
                            struct rte_security_session_conf *conf);
 rte_security_session_update(struct rte_security_ctx *instance,
                            struct rte_security_session *sess,
                            struct rte_security_session_conf *conf);
@@ -359,7 +358,7 @@ rte_security_session_update(struct rte_security_ctx *instance,
  *   - Size of the private data, if successful
  *   - 0 if device is invalid or does not support the operation.
  */
  *   - Size of the private data, if successful
  *   - 0 if device is invalid or does not support the operation.
  */
-unsigned int __rte_experimental
+unsigned int
 rte_security_session_get_size(struct rte_security_ctx *instance);
 
 /**
 rte_security_session_get_size(struct rte_security_ctx *instance);
 
 /**
@@ -374,7 +373,7 @@ rte_security_session_get_size(struct rte_security_ctx *instance);
  *  - -EINVAL if session is NULL.
  *  - -EBUSY if not all device private data has been freed.
  */
  *  - -EINVAL if session is NULL.
  *  - -EBUSY if not all device private data has been freed.
  */
-int __rte_experimental
+int
 rte_security_session_destroy(struct rte_security_ctx *instance,
                             struct rte_security_session *sess);
 
 rte_security_session_destroy(struct rte_security_ctx *instance,
                             struct rte_security_session *sess);
 
@@ -391,7 +390,7 @@ rte_security_session_destroy(struct rte_security_ctx *instance,
  *  - On success, zero.
  *  - On failure, a negative value.
  */
  *  - On success, zero.
  *  - On failure, a negative value.
  */
-int __rte_experimental
+int
 rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
                              struct rte_security_session *sess,
                              struct rte_mbuf *mb, void *params);
 rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
                              struct rte_security_session *sess,
                              struct rte_mbuf *mb, void *params);
@@ -413,7 +412,7 @@ rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
  *  - On success, userdata
  *  - On failure, NULL
  */
  *  - On success, userdata
  *  - On failure, NULL
  */
-void * __rte_experimental
+void *
 rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md);
 
 /**
 rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md);
 
 /**
@@ -422,7 +421,7 @@ rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md);
  * @param      sym_op  crypto operation
  * @param      sess    security session
  */
  * @param      sym_op  crypto operation
  * @param      sess    security session
  */
-static inline int __rte_experimental
+static inline int
 __rte_security_attach_session(struct rte_crypto_sym_op *sym_op,
                              struct rte_security_session *sess)
 {
 __rte_security_attach_session(struct rte_crypto_sym_op *sym_op,
                              struct rte_security_session *sess)
 {
@@ -431,13 +430,13 @@ __rte_security_attach_session(struct rte_crypto_sym_op *sym_op,
        return 0;
 }
 
        return 0;
 }
 
-static inline void * __rte_experimental
+static inline void *
 get_sec_session_private_data(const struct rte_security_session *sess)
 {
        return sess->sess_private_data;
 }
 
 get_sec_session_private_data(const struct rte_security_session *sess)
 {
        return sess->sess_private_data;
 }
 
-static inline void __rte_experimental
+static inline void
 set_sec_session_private_data(struct rte_security_session *sess,
                             void *private_data)
 {
 set_sec_session_private_data(struct rte_security_session *sess,
                             void *private_data)
 {
@@ -453,7 +452,7 @@ set_sec_session_private_data(struct rte_security_session *sess,
  * @param      op      crypto operation
  * @param      sess    security session
  */
  * @param      op      crypto operation
  * @param      sess    security session
  */
-static inline int __rte_experimental
+static inline int
 rte_security_attach_session(struct rte_crypto_op *op,
                            struct rte_security_session *sess)
 {
 rte_security_attach_session(struct rte_crypto_op *op,
                            struct rte_security_session *sess)
 {
@@ -500,7 +499,7 @@ struct rte_security_stats {
  *  - On success return 0
  *  - On failure errno
  */
  *  - On success return 0
  *  - On failure errno
  */
-int __rte_experimental
+int
 rte_security_session_stats_get(struct rte_security_ctx *instance,
                               struct rte_security_session *sess,
                               struct rte_security_stats *stats);
 rte_security_session_stats_get(struct rte_security_ctx *instance,
                               struct rte_security_session *sess,
                               struct rte_security_stats *stats);
@@ -608,7 +607,7 @@ struct rte_security_capability_idx {
  *   - Returns array of security capabilities.
  *   - Return NULL if no capabilities available.
  */
  *   - Returns array of security capabilities.
  *   - Return NULL if no capabilities available.
  */
-const struct rte_security_capability * __rte_experimental
+const struct rte_security_capability *
 rte_security_capabilities_get(struct rte_security_ctx *instance);
 
 /**
 rte_security_capabilities_get(struct rte_security_ctx *instance);
 
 /**
@@ -622,7 +621,7 @@ rte_security_capabilities_get(struct rte_security_ctx *instance);
  *     index criteria.
  *   - Return NULL if the capability not matched on security instance.
  */
  *     index criteria.
  *   - Return NULL if the capability not matched on security instance.
  */
-const struct rte_security_capability * __rte_experimental
+const struct rte_security_capability *
 rte_security_capability_get(struct rte_security_ctx *instance,
                            struct rte_security_capability_idx *idx);
 
 rte_security_capability_get(struct rte_security_ctx *instance,
                            struct rte_security_capability_idx *idx);
 
index 42f42ff..1b561f8 100644 (file)
@@ -8,7 +8,6 @@
 
 /**
  * @file rte_security_driver.h
 
 /**
  * @file rte_security_driver.h
- * @b EXPERIMENTAL: this API may change without prior notice
  *
  * RTE Security Common Definitions
  *
  *
  * RTE Security Common Definitions
  *
index 5a1c8ae..a77ca4b 100644 (file)
@@ -1,4 +1,4 @@
-EXPERIMENTAL {
+DPDK_18.11 {
        global:
 
        rte_security_attach_session;
        global:
 
        rte_security_attach_session;
index a72237e..1547e91 100644 (file)
@@ -2,7 +2,7 @@
 # Copyright(c) 2017 Intel Corporation
 
 project('DPDK', 'C',
 # Copyright(c) 2017 Intel Corporation
 
 project('DPDK', 'C',
-       version: '18.11.0-rc3',
+       version: '18.11.0-rc4',
        license: 'BSD',
        default_options: ['buildtype=release', 'default_library=static'],
        meson_version: '>= 0.41'
        license: 'BSD',
        default_options: ['buildtype=release', 'default_library=static'],
        meson_version: '>= 0.41'
index 3a99076..2acab9d 100644 (file)
@@ -951,10 +951,15 @@ test_file_prefix(void)
         * 2. try to run secondary process without a corresponding primary process
         * (while failing to run, it will also remove any unused hugepage files)
         * 3. check if current process hugefiles are still in place and are locked
         * 2. try to run secondary process without a corresponding primary process
         * (while failing to run, it will also remove any unused hugepage files)
         * 3. check if current process hugefiles are still in place and are locked
-        * 4. run a primary process with memtest1 prefix
-        * 5. check if memtest1 hugefiles are created
-        * 6. run a primary process with memtest2 prefix
-        * 7. check that only memtest2 hugefiles are present in the hugedir
+        * 4. run a primary process with memtest1 prefix in default and legacy
+        *    mem mode
+        * 5. check if memtest1 hugefiles are created in case of legacy mem
+        *    mode, and deleted in case of default mem mode
+        * 6. run a primary process with memtest2 prefix in default and legacy
+        *    mem modes
+        * 7. check that memtest2 hugefiles are present in the hugedir after a
+        *    run in legacy mode, and not present at all after run in default
+        *    mem mode
         */
        char prefix[PATH_MAX] = "";
 
         */
        char prefix[PATH_MAX] = "";
 
@@ -971,13 +976,23 @@ test_file_prefix(void)
        const char *argv0[] = {prgname, mp_flag, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
                        "--file-prefix=" memtest };
 
        const char *argv0[] = {prgname, mp_flag, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
                        "--file-prefix=" memtest };
 
-       /* primary process with memtest1 */
-       const char *argv1[] = {prgname, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
-                               "--file-prefix=" memtest1 };
+       /* primary process with memtest1 and default mem mode */
+       const char *argv1[] = {prgname, "-c", "1", "-n", "2", "-m",
+                       DEFAULT_MEM_SIZE, "--file-prefix=" memtest1 };
 
 
-       /* primary process with memtest2 */
-       const char *argv2[] = {prgname, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
-                               "--file-prefix=" memtest2 };
+       /* primary process with memtest1 and legacy mem mode */
+       const char *argv2[] = {prgname, "-c", "1", "-n", "2", "-m",
+                       DEFAULT_MEM_SIZE, "--file-prefix=" memtest1,
+                       "--legacy-mem" };
+
+       /* primary process with memtest2 and legacy mem mode */
+       const char *argv3[] = {prgname, "-c", "1", "-n", "2", "-m",
+                       DEFAULT_MEM_SIZE, "--file-prefix=" memtest2,
+                       "--legacy-mem" };
+
+       /* primary process with memtest2 and default mem mode */
+       const char *argv4[] = {prgname, "-c", "1", "-n", "2", "-m",
+                       DEFAULT_MEM_SIZE, "--file-prefix=" memtest2 };
 
        /* check if files for current prefix are present */
        if (process_hugefiles(prefix, HUGEPAGE_CHECK_EXISTS) != 1) {
 
        /* check if files for current prefix are present */
        if (process_hugefiles(prefix, HUGEPAGE_CHECK_EXISTS) != 1) {
@@ -1024,31 +1039,78 @@ test_file_prefix(void)
                return -1;
        }
 
                return -1;
        }
 
+       /* we're running this process in default memory mode, which means it
+        * should clean up after itself on exit and leave no hugepages behind.
+        */
        if (launch_proc(argv1) != 0) {
        if (launch_proc(argv1) != 0) {
-               printf("Error - failed to run with --file-prefix=%s\n", memtest);
+               printf("Error - failed to run with --file-prefix=%s\n",
+                               memtest1);
                return -1;
        }
 
        /* check if memtest1_map0 is present */
                return -1;
        }
 
        /* check if memtest1_map0 is present */
-       if (process_hugefiles(memtest1, HUGEPAGE_CHECK_EXISTS) != 1) {
-               printf("Error - hugepage files for %s were not created!\n", memtest1);
+       if (process_hugefiles(memtest1, HUGEPAGE_CHECK_EXISTS) != 0) {
+               printf("Error - hugepage files for %s were not deleted!\n",
+                               memtest1);
                return -1;
        }
 
                return -1;
        }
 
+       /* now, we're running a process under the same prefix, but with legacy
+        * mem mode - this should leave behind hugepage files.
+        */
        if (launch_proc(argv2) != 0) {
        if (launch_proc(argv2) != 0) {
-               printf("Error - failed to run with --file-prefix=%s\n", memtest2);
+               printf("Error - failed to run with --file-prefix=%s\n",
+                               memtest1);
+               return -1;
+       }
+
+       /* check if memtest1_map0 is present */
+       if (process_hugefiles(memtest1, HUGEPAGE_CHECK_EXISTS) != 1) {
+               printf("Error - hugepage files for %s were not created!\n",
+                               memtest1);
+               return -1;
+       }
+
+       if (launch_proc(argv3) != 0) {
+               printf("Error - failed to run with --file-prefix=%s\n",
+                               memtest2);
                return -1;
        }
 
        /* check if hugefiles for memtest2 are present */
        if (process_hugefiles(memtest2, HUGEPAGE_CHECK_EXISTS) != 1) {
                return -1;
        }
 
        /* check if hugefiles for memtest2 are present */
        if (process_hugefiles(memtest2, HUGEPAGE_CHECK_EXISTS) != 1) {
-               printf("Error - hugepage files for %s were not created!\n", memtest2);
+               printf("Error - hugepage files for %s were not created!\n",
+                               memtest2);
+               return -1;
+       }
+
+       /* check if hugefiles for memtest1 are present */
+       if (process_hugefiles(memtest1, HUGEPAGE_CHECK_EXISTS) != 0) {
+               printf("Error - hugepage files for %s were not deleted!\n",
+                               memtest1);
+               return -1;
+       }
+
+       /* this process will run in default mem mode, so it should not leave any
+        * hugepage files behind.
+        */
+       if (launch_proc(argv4) != 0) {
+               printf("Error - failed to run with --file-prefix=%s\n",
+                               memtest2);
+               return -1;
+       }
+
+       /* check if hugefiles for memtest2 are present */
+       if (process_hugefiles(memtest2, HUGEPAGE_CHECK_EXISTS) != 0) {
+               printf("Error - hugepage files for %s were not deleted!\n",
+                               memtest2);
                return -1;
        }
 
        /* check if hugefiles for memtest1 are present */
        if (process_hugefiles(memtest1, HUGEPAGE_CHECK_EXISTS) != 0) {
                return -1;
        }
 
        /* check if hugefiles for memtest1 are present */
        if (process_hugefiles(memtest1, HUGEPAGE_CHECK_EXISTS) != 0) {
-               printf("Error - hugepage files for %s were not deleted!\n", memtest1);
+               printf("Error - hugepage files for %s were not deleted!\n",
+                               memtest1);
                return -1;
        }
 
                return -1;
        }
 
index dc19175..4e82e9a 100644 (file)
@@ -424,7 +424,7 @@ test_interrupt(void)
 
        printf("Check valid alarm interrupt full path\n");
        if (test_interrupt_full_path_check(
 
        printf("Check valid alarm interrupt full path\n");
        if (test_interrupt_full_path_check(
-               TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT) < 0) {
+               TEST_INTERRUPT_HANDLE_VALID_ALARM) < 0) {
                printf("failure occurred during checking valid alarm "
                                                "interrupt full path\n");
                goto out;
                printf("failure occurred during checking valid alarm "
                                                "interrupt full path\n");
                goto out;
index ad5004a..6318da1 100644 (file)
@@ -10,6 +10,7 @@
 #include <rte_launch.h>
 #include <rte_ethdev.h>
 #include <rte_eth_ring.h>
 #include <rte_launch.h>
 #include <rte_ethdev.h>
 #include <rte_eth_ring.h>
+#include <rte_bus_vdev.h>
 
 #include "test.h"
 
 
 #include "test.h"
 
@@ -135,6 +136,8 @@ test_bulk_enqueue_dequeue(void)
 static int
 test_ring_pmd_perf(void)
 {
 static int
 test_ring_pmd_perf(void)
 {
+       char name[RTE_ETH_NAME_MAX_LEN];
+
        r = rte_ring_create(RING_NAME, RING_SIZE, rte_socket_id(),
                        RING_F_SP_ENQ|RING_F_SC_DEQ);
        if (r == NULL && (r = rte_ring_lookup(RING_NAME)) == NULL)
        r = rte_ring_create(RING_NAME, RING_SIZE, rte_socket_id(),
                        RING_F_SP_ENQ|RING_F_SC_DEQ);
        if (r == NULL && (r = rte_ring_lookup(RING_NAME)) == NULL)
@@ -151,6 +154,11 @@ test_ring_pmd_perf(void)
        printf("\n### Testing using a single lcore ###\n");
        test_bulk_enqueue_dequeue();
 
        printf("\n### Testing using a single lcore ###\n");
        test_bulk_enqueue_dequeue();
 
+       /* release port and ring resources */
+       rte_eth_dev_stop(ring_ethdev_port);
+       rte_eth_dev_get_name_by_port(ring_ethdev_port, name);
+       rte_vdev_uninit(name);
+       rte_ring_free(r);
        return 0;
 }
 
        return 0;
 }
 
index bce706d..785cd04 100644 (file)
@@ -101,7 +101,7 @@ test_power_kvm_vm(void)
                                "Power management environment\n",
                                TEST_POWER_VM_LCORE_ID);
                rte_power_unset_env();
                                "Power management environment\n",
                                TEST_POWER_VM_LCORE_ID);
                rte_power_unset_env();
-               return -1;
+               return TEST_SKIPPED;
        }
 
        /* Test initialisation of previously initialised lcore */
        }
 
        /* Test initialisation of previously initialised lcore */
index 7d56463..40dc28a 100755 (executable)
@@ -655,6 +655,13 @@ def do_arg_actions():
 
 def main():
     '''program main function'''
 
 def main():
     '''program main function'''
+    # check if lspci is installed, suppress any output
+    with open(os.devnull, 'w') as devnull:
+        ret = subprocess.call(['which', 'lspci'],
+                              stdout=devnull, stderr=devnull)
+        if ret != 0:
+            print("'lspci' not found - please install 'pciutils'")
+            sys.exit(1)
     parse_args()
     check_modules()
     clear_data()
     parse_args()
     check_modules()
     clear_data()