F: drivers/net/i40e/i40e_rxtx_vec_neon.c
F: drivers/net/virtio/virtio_rxtx_simple_neon.c
-IBM POWER
+IBM POWER (alpha)
M: Chao Zhu <chaozhu@linux.vnet.ibm.com>
F: lib/librte_eal/common/arch/ppc_64/
F: lib/librte_eal/common/include/arch/ppc_64/
F: test/test/test_cryptodev*
F: examples/l2fwd-crypto/
-Security API - EXPERIMENTAL
+Security API
M: Akhil Goyal <akhil.goyal@nxp.com>
M: Declan Doherty <declan.doherty@intel.com>
T: git://dpdk.org/next/dpdk-next-crypto
/* params for packet dumping */
enum pdump_by dump_by_type;
- int rx_vdev_id;
- int tx_vdev_id;
+ uint16_t rx_vdev_id;
+ uint16_t tx_vdev_id;
enum pcap_stream rx_vdev_stream_type;
enum pcap_stream tx_vdev_stream_type;
bool single_pdump_dev;
&parse_uint_value, &v);
if (ret < 0)
goto free_kvlist;
- pt->port = (uint8_t) v.val;
+ pt->port = (uint16_t) v.val;
pt->dump_by_type = PORT_ID;
} else if (cnt2 == 1) {
ret = rte_kvargs_process(kvlist, PDUMP_PCI_ARG,
}
static inline void
-pdump_rxtx(struct rte_ring *ring, uint8_t vdev_id, struct pdump_stats *stats)
+pdump_rxtx(struct rte_ring *ring, uint16_t vdev_id, struct pdump_stats *stats)
{
/* write input packets of port to vdev for pdump */
struct rte_mbuf *rxtx_bufs[BURST_SIZE];
}
static void
-free_ring_data(struct rte_ring *ring, uint8_t vdev_id,
+free_ring_data(struct rte_ring *ring, uint16_t vdev_id,
struct pdump_stats *stats)
{
while (rte_ring_count(ring))
}
if (port_id_is_invalid(port_id, ENABLED_WARN))
- return;
+ goto free_table;
/* Update Meter DSCP Table*/
ret = rte_mtr_meter_dscp_table_update(port_id, mtr_id,
dscp_table, &error);
- if (ret != 0) {
+ if (ret != 0)
print_err_msg(&error);
- return;
- }
+
+free_table:
free(dscp_table);
}
static int eth_event_callback(portid_t port_id,
enum rte_eth_event_type type,
void *param, void *ret_param);
-static void eth_dev_event_callback(const char *device_name,
+static void dev_event_callback(const char *device_name,
enum rte_dev_event_type type,
void *param);
}
ret = rte_dev_event_callback_unregister(NULL,
- eth_dev_event_callback, NULL);
+ dev_event_callback, NULL);
if (ret < 0) {
RTE_LOG(ERR, EAL,
"fail to unregister device event callback.\n");
}
}
+/*
+ * This callback is for remove a port for a device. It has limitation because
+ * it is not for multiple port removal for a device.
+ * TODO: the device detach invoke will plan to be removed from user side to
+ * eal. And convert all PMDs to free port resources on ether device closing.
+ */
static void
-rmv_event_callback(void *arg)
+rmv_port_callback(void *arg)
{
int need_to_start = 0;
int org_no_link_check = no_link_check;
if (port_id_is_invalid(port_id, DISABLED_WARN))
break;
if (rte_eal_alarm_set(100000,
- rmv_event_callback, (void *)(intptr_t)port_id))
+ rmv_port_callback, (void *)(intptr_t)port_id))
fprintf(stderr, "Could not set up deferred device removal\n");
break;
default:
/* This function is used by the interrupt thread */
static void
-eth_dev_event_callback(const char *device_name, enum rte_dev_event_type type,
+dev_event_callback(const char *device_name, enum rte_dev_event_type type,
__rte_unused void *arg)
{
uint16_t port_id;
switch (type) {
case RTE_DEV_EVENT_REMOVE:
- RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
+ RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
device_name);
ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
if (ret) {
device_name);
return;
}
- rmv_event_callback((void *)(intptr_t)port_id);
+ /*
+ * Because the user's callback is invoked in eal interrupt
+ * callback, the interrupt callback need to be finished before
+ * it can be unregistered when detaching device. So finish
+ * callback soon and use a deferred removal to detach device
+ * is need. It is a workaround, once the device detaching be
+ * moved into the eal in the future, the deferred removal could
+ * be deleted.
+ */
+ if (rte_eal_alarm_set(100000,
+ rmv_port_callback, (void *)(intptr_t)port_id))
+ RTE_LOG(ERR, EAL,
+ "Could not set up deferred device removal\n");
break;
case RTE_DEV_EVENT_ADD:
RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
}
ret = rte_dev_event_callback_register(NULL,
- eth_dev_event_callback, NULL);
+ dev_event_callback, NULL);
if (ret) {
RTE_LOG(ERR, EAL,
"fail to register device event callback\n");
dpdk_conf.set('RTE_CACHE_LINE_SIZE', 64)
dpdk_conf.set('RTE_ARCH_ARM', 1)
dpdk_conf.set('RTE_ARCH_ARMv7', 1)
+ # the minimum architecture supported, armv7-a, needs the following,
+ # mk/machine/armv7a/rte.vars.mk sets it too
+ machine_args += '-mfpu=neon'
else
dpdk_conf.set('RTE_CACHE_LINE_SIZE', 128)
dpdk_conf.set('RTE_ARCH_ARM64', 1)
else
machine = get_option('machine')
endif
+
+# machine type 'default' is special, it defaults to the per arch agreed common
+# minimal baseline needed for DPDK.
+# That might not be the most optimized, but the most portable version while
+# still being able to support the CPU features required for DPDK.
+# This can be bumped up by the DPDK project, but it can never be an
+# invariant like 'native'
+if machine == 'default'
+ if host_machine.cpu_family().startswith('x86')
+ # matches the old pre-meson build systems default
+ machine = 'corei7'
+ elif host_machine.cpu_family().startswith('arm')
+ machine = 'armv7-a'
+ elif host_machine.cpu_family().startswith('aarch')
+ # arm64 manages defaults in config/arm/meson.build
+ machine = 'default'
+ elif host_machine.cpu_family().startswith('ppc')
+ machine = 'power8'
+ endif
+endif
+
dpdk_conf.set('RTE_MACHINE', machine)
machine_args = []
-# ppc64 does not support -march=native
-if host_machine.cpu_family().startswith('ppc') and machine == 'native'
+
+# ppc64 does not support -march= at all, use -mcpu and -mtune for that
+if host_machine.cpu_family().startswith('ppc')
machine_args += '-mcpu=' + machine
machine_args += '-mtune=' + machine
else
# does not end in 'map', indicating we have left the map chunk.
# When we hit this, turn off the in_map variable, which
# supresses the subordonate rules below
- /[-+] a\/.*\.^(map)/ {in_map=0}
+ /[-+] a\/.*\.[^map]/ {in_map=0}
# Triggering this rule, which starts a line and ends it
# with a { identifies a versioned section. The section name is
build_map_changes "$patch" "$mapfile"
check_for_rule_violations "$mapfile"
exit_code=$?
-
rm -f "$mapfile"
exit $exit_code
Use ``ninja install`` to install the required DPDK files onto the system.
The install prefix defaults to ``/usr/local`` but can be used as with other
-options above. The environment variable ``DEST_DIR`` can be used to adjust
+options above. The environment variable ``DESTDIR`` can be used to adjust
the root directory for the install, for example when packaging.
With the base install directory, the individual directories for libraries
build:
@mkdir -p $@
+
+NOTE: for --static builds, DPDK needs to be built with Meson >= 0.46 in order to
+fully generate the list of private dependencies. If DPDK is built with an older
+version of Meson, it might be necessary to manually specify dependencies of DPDK
+PMDs/libraries, for example -lmlx5 -lmnl for librte-pmd-mlx5, or the static link
+step might fail.
Supported DPAA2 SoCs
--------------------
-* LS2080A/LS2040A
+* LS2160A
* LS2084A/LS2044A
* LS2088A/LS2048A
* LS1088A/LS1048A
DPAA2_SEC driver has similar pre-requisites as described in :ref:`dpaa2_overview`.
The following dependencies are not part of DPDK and must be installed separately:
-* **NXP Linux SDK**
-
- NXP Linux software development kit (SDK) includes support for the family
- of QorIQ® ARM-Architecture-based system on chip (SoC) processors
- and corresponding boards.
-
- It includes the Linux board support packages (BSPs) for NXP SoCs,
- a fully operational tool chain, kernel and board specific modules.
-
- SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-* **DPDK Extra Scripts**
-
- DPAA2 based resources can be configured easily with the help of ready scripts
- as provided in the DPDK helper repository.
-
- `DPDK Extra Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa2` for setup information
Currently supported by DPDK:
-* NXP SDK **17.08+**.
-* MC Firmware version **10.3.1** and higher.
-* Supported architectures: **arm64 LE**.
+- NXP SDK **18.09+**.
+- MC Firmware version **10.10.0** and higher.
+- Supported architectures: **arm64 LE**.
-* Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
Pre-Installation Configuration
------------------------------
-------------
DPAA_SEC driver has similar pre-requisites as described in :ref:`dpaa_overview`.
-The following dependencies are not part of DPDK and must be installed separately:
-* **NXP Linux SDK**
+See :doc:`../platform/dpaa` for setup information
- NXP Linux software development kit (SDK) includes support for the family
- of QorIQ® ARM-Architecture-based system on chip (SoC) processors
- and corresponding boards.
- It includes the Linux board support packages (BSPs) for NXP SoCs,
- a fully operational tool chain, kernel and board specific modules.
-
- SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-* **DPDK Extras Scripts**
-
- DPAA based resources can be configured easily with the help of ready scripts
- as provided in the DPDK Extras repository.
-
- `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
-
-Currently supported by DPDK:
-
-* NXP SDK **2.0+**.
-* Supported architectures: **arm64 LE**.
-
-* Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
Pre-Installation Configuration
------------------------------
* ``RTE_CRYPTO_AEAD_AES_GCM``
-Compilation
------------
-
-The **OCTEON TX** :sup:`®` board must be running the linux kernel based on
-sdk-6.2.0 patch 3. In this, the OCTEON TX crypto PF driver is already built in.
+Config flags
+------------
For compiling the OCTEON TX crypto poll mode driver, please check if the
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO setting is set to `y` in
* ``CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=y``
-The following are the steps to compile the OCTEON TX crypto poll mode driver:
+Compilation
+-----------
-.. code-block:: console
+The OCTEON TX crypto poll mode driver can be compiled either natively on
+**OCTEON TX** :sup:`®` board or cross-compiled on an x86 based platform.
- cd <dpdk directory>
- make config T=arm64-thunderx-linuxapp-gcc
- make
+Refer :doc:`../platform/octeontx` for details about setting up the platform
+and building DPDK applications.
-The example applications can be compiled using the following:
+.. note::
-.. code-block:: console
+ OCTEON TX crypto PF driver needs microcode to be available at `/lib/firmware/` directory.
+ Refer SDK documents for further information.
- cd <dpdk directory>
- export RTE_SDK=$PWD
- export RTE_TARGET=build
- cd examples/<application>
- make
+SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
Execution
---------
Supported DPAA SoCs
--------------------
-- LS1046A
-- LS1043A
+- LS1046A/LS1026A
+- LS1043A/LS1023A
Prerequisites
-------------
-There are following pre-requisites for executing EVENTDEV on a DPAA compatible
-platform:
-
-1. **ARM 64 Tool Chain**
-
- For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.4-2017.08/aarch64-linux-gnu/>`_.
-
-2. **Linux Kernel**
-
- It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile System**
-
- Any *aarch64* supporting filesystem can be used. For example,
- Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
- from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-As an alternative method, DPAA EVENTDEV can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
- NXP Linux software development kit (SDK) includes support for family
- of QorIQ® ARM-Architecture-based system on chip (SoC) processors
- and corresponding boards.
-
- It includes the Linux board support packages (BSPs) for NXP SoCs,
- a fully operational tool chain, kernel and board specific modules.
-
- SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-- **DPDK Extra Scripts**
-
- DPAA based resources can be configured easily with the help of ready to use
- xml files as provided in the DPDK Extra repository.
-
- `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa` for setup information
Currently supported by DPDK:
-- NXP SDK **2.0+** or LSDK **17.09+**
+- NXP SDK **2.0+** or LSDK **18.09+**
- Supported architectures: **arm64 LE**.
- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
Supported DPAA2 SoCs
--------------------
-- LS2080A/LS2040A
+- LX2160A
- LS2084A/LS2044A
- LS2088A/LS2048A
- LS1088A/LS1048A
Prerequisites
-------------
-There are three main pre-requisities for executing DPAA2 EVENTDEV on a DPAA2
-compatible board:
-
-1. **ARM 64 Tool Chain**
-
- For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/4.9-2017.01/aarch64-linux-gnu>`_.
-
-2. **Linux Kernel**
-
- It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile system**
-
- Any *aarch64* supporting filesystem can be used. For example,
- Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
- from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-As an alternative method, DPAA2 EVENTDEV can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA2 board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
- NXP Linux software development kit (SDK) includes support for family
- of QorIQ® ARM-Architecture-based system on chip (SoC) processors
- and corresponding boards.
-
- It includes the Linux board support packages (BSPs) for NXP SoCs,
- a fully operational tool chain, kernel and board specific modules.
-
- SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-- **DPDK Extra Scripts**
-
- DPAA2 based resources can be configured easily with the help of ready scripts
- as provided in the DPDK Extra repository.
-
- `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa2` for setup information
Currently supported by DPDK:
-- NXP SDK **2.0+**.
-- MC Firmware version **10.0.0** and higher.
+- NXP SDK **18.09+**.
+- MC Firmware version **10.10.0** and higher.
- Supported architectures: **arm64 LE**.
- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
Port-core binding
~~~~~~~~~~~~~~~~~
-DPAA2 EVENTDEV driver requires event port 'x' to be used on core 'x'.
+DPAA2 EVENTDEV can support only one eventport per core.
``vfio-pci`` kernel module rather than ``igb_uio`` or ``uio_pci_generic``.
For more details see :ref:`linux_gsg_binding_kernel` below.
+.. note::
+
+ If the devices used for DPDK are bound to the ``uio_pci_generic`` kernel module,
+ please make sure that the IOMMU is disabled or passthrough. One can add
+ ``intel_iommu=off`` or ``amd_iommu=off`` or ``intel_iommu=on iommu=pt``in GRUB
+ command line on x86_64 systems, or add ``iommu.passthrough=1`` on arm64 system.
+
Since DPDK release 1.7 onward provides VFIO support, use of UIO is optional
for platforms that support using VFIO.
x86_x32 ABI is currently supported with distribution packages only on Ubuntu
higher than 13.10 or recent Debian distribution. The only supported compiler is gcc 4.9+.
-* libnuma-devel - library for handling NUMA (Non Uniform Memory Access).
+* Library for handling NUMA (Non Uniform Memory Access).
+
+ * numactl-devel in Red Hat/Fedora;
+
+ * libnuma-dev in Debian/Ubuntu;
* Python, version 2.7+ or 3.2+, to use various helper scripts included in the DPDK package.
uname -r
+.. note::
+
+ Kernel version 3.2 is no longer a kernel.org longterm stable kernel.
+ For DPDK 19.02 the minimum required kernel will be updated to
+ the current kernel.org oldest longterm stable supported kernel 3.16,
+ or recent versions of common distributions, notably RHEL/CentOS 7.
+
* glibc >= 2.7 (for features related to cpuset)
The version can be checked using the ``ldd --version`` command.
For 1G pages, it is not possible to reserve the hugepage memory after the system has booted.
- On IBM POWER system, the nr_overcommit_hugepages should be set to the same value as nr_hugepages.
- For example, if the required page number is 128, the following commands are used::
-
- echo 128 > /sys/kernel/mm/hugepages/hugepages-16384kB/nr_hugepages
- echo 128 > /sys/kernel/mm/hugepages/hugepages-16384kB/nr_overcommit_hugepages
-
Using Hugepages with the DPDK
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Prerequisites
-------------
-There are three main pre-requisities for executing DPAA PMD on a DPAA
-compatible board:
+See :doc:`../platform/dpaa` for setup information
-1. **ARM 64 Tool Chain**
-
- For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.4-2017.08/aarch64-linux-gnu/>`_.
-
-2. **Linux Kernel**
-
- It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile system**
-
- Any *aarch64* supporting filesystem can be used. For example,
- Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
- from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-4. **FMC Tool**
-
- Before any DPDK application can be executed, the Frame Manager Configuration
- Tool (FMC) need to be executed to set the configurations of the queues. This
- includes the queue state, RSS and other policies.
- This tool can be obtained from `NXP (Freescale) Public Git Repository <https://github.com/qoriq-open-source/fmc>`_.
-
- This tool needs configuration files which are available in the
- :ref:`DPDK Extra Scripts <extra_scripts>`, described below for DPDK usages.
-
-As an alternative method, DPAA PMD can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
- NXP Linux software development kit (SDK) includes support for family
- of QorIQ® ARM-Architecture-based system on chip (SoC) processors
- and corresponding boards.
-
- It includes the Linux board support packages (BSPs) for NXP SoCs,
- a fully operational tool chain, kernel and board specific modules.
-
- SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-
-.. _extra_scripts:
-
-- **DPDK Extra Scripts**
-
- DPAA based resources can be configured easily with the help of ready scripts
- as provided in the DPDK Extra repository.
-
- `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
-
-Currently supported by DPDK:
-
-- NXP SDK **2.0+**.
-- Supported architectures: **arm64 LE**.
- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>`
to setup the basic DPDK environment.
Supported DPAA2 SoCs
--------------------
-
-- LS2080A/LS2040A
+- LX2160A
- LS2084A/LS2044A
- LS2088A/LS2048A
- LS1088A/LS1048A
Prerequisites
-------------
-There are three main pre-requisities for executing DPAA2 PMD on a DPAA2
-compatible board:
-
-1. **ARM 64 Tool Chain**
-
- For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.4-2017.08/aarch64-linux-gnu/>`_.
-
-2. **Linux Kernel**
-
- It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile system**
-
- Any *aarch64* supporting filesystem can be used. For example,
- Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
- from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-As an alternative method, DPAA2 PMD can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA2 board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
- NXP Linux software development kit (SDK) includes support for family
- of QorIQ® ARM-Architecture-based system on chip (SoC) processors
- and corresponding boards.
-
- It includes the Linux board support packages (BSPs) for NXP SoCs,
- a fully operational tool chain, kernel and board specific modules.
-
- SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-- **DPDK Extra Scripts**
-
- DPAA2 based resources can be configured easily with the help of ready scripts
- as provided in the DPDK Extra repository.
-
- `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa2` for setup information
Currently supported by DPDK:
-- NXP SDK **17.08+**.
-- MC Firmware version **10.3.1** and higher.
+- NXP SDK **18.09+**.
+- MC Firmware version **10.10.0** and higher.
- Supported architectures: **arm64 LE**.
- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
--- /dev/null
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 NXP
+
+NXP QorIQ DPAA Board Support Package
+====================================
+
+This doc has information about steps to setup QorIq dpaa
+based layerscape platform and information about common offload
+hw block drivers of **NXP QorIQ DPAA** SoC family.
+
+Supported DPAA SoCs
+--------------------
+
+* LS1046A/LS1026A
+* LS1043A/LS1023A
+
+More information about SoC can be found at `NXP Official Website
+<https://www.nxp.com/products/processors-and-microcontrollers/arm-based-
+processors-and-mcus/qoriq-layerscape-arm-processors:QORIQ-ARM>`_.
+
+
+Common Offload HW Block Drivers
+-------------------------------
+
+1. **Nics Driver**
+
+ See :doc:`../nics/dpaa` for NXP dpaa nic driver information.
+
+2. **Cryptodev Driver**
+
+ See :doc:`../cryptodevs/dpaa_sec` for NXP dpaa cryptodev driver information.
+
+3. **Eventdev Driver**
+
+ See :doc:`../eventdevs/dpaa` for NXP dpaa eventdev driver information.
+
+
+Steps To Setup Platform
+-----------------------
+
+There are four main pre-requisities for executing DPAA PMD on a DPAA
+compatible board:
+
+1. **ARM 64 Tool Chain**
+
+ For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/7.3-2018.05/aarch64-linux-gnu/gcc-linaro-7.3.1-2018.05-i686_aarch64-linux-gnu.tar.xz>`_.
+
+2. **Linux Kernel**
+
+ It can be obtained from `NXP's Github hosting <https://source.codeaurora.org/external/qoriq/qoriq-components/linux>`_.
+
+3. **Rootfile system**
+
+ Any *aarch64* supporting filesystem can be used. For example,
+ Ubuntu 16.04 LTS (Xenial) or 18.04 (Bionic) userland which can be obtained
+ from `here
+ <http://cdimage.ubuntu.com/ubuntu-base/releases/18.04/release/ubuntu-base-18.04.1-base-arm64.tar.gz>`_.
+
+4. **FMC Tool**
+
+ Before any DPDK application can be executed, the Frame Manager Configuration
+ Tool (FMC) need to be executed to set the configurations of the queues. This
+ includes the queue state, RSS and other policies.
+ This tool can be obtained from `NXP (Freescale) Public Git Repository <https://source.codeaurora.org/external/qoriq/qoriq-components/fmc>`_.
+
+ This tool needs configuration files which are available in the
+ :ref:`DPDK Extra Scripts <extra_scripts>`, described below for DPDK usages.
+
+As an alternative method, DPAA PMD can also be executed using images provided
+as part of SDK from NXP. The SDK includes all the above prerequisites necessary
+to bring up a DPAA board.
+
+The following dependencies are not part of DPDK and must be installed
+separately:
+
+- **NXP Linux SDK**
+
+ NXP Linux software development kit (SDK) includes support for family
+ of QorIQ® ARM-Architecture-based system on chip (SoC) processors
+ and corresponding boards.
+
+ It includes the Linux board support packages (BSPs) for NXP SoCs,
+ a fully operational tool chain, kernel and board specific modules.
+
+ SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
+
+
+.. _extra_scripts:
+
+- **DPDK Extra Scripts**
+
+ DPAA based resources can be configured easily with the help of ready scripts
+ as provided in the DPDK Extra repository.
+
+ `DPDK Extras Scripts <https://source.codeaurora.org/external/qoriq/qoriq-components/dpdk-extras>`_.
+
+Currently supported by DPDK:
+
+- NXP SDK **2.0+** (preferred: LSDK 18.09).
+- Supported architectures: **arm64 LE**.
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>`
+ to setup the basic DPDK environment.
--- /dev/null
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 NXP
+
+NXP QorIQ DPAA2 Board Support Package
+=====================================
+
+This doc has information about steps to setup NXP QoriQ DPAA2 platform
+and information about common offload hw block drivers of
+**NXP QorIQ DPAA2** SoC family.
+
+Supported DPAA2 SoCs
+--------------------
+
+- LX2160A
+- LS2084A/LS2044A
+- LS2088A/LS2048A
+- LS1088A/LS1048A
+
+More information about SoC can be found at `NXP Official Website
+<https://www.nxp.com/products/processors-and-microcontrollers/arm-based-
+processors-and-mcus/qoriq-layerscape-arm-processors:QORIQ-ARM>`_.
+
+
+Common Offload HW Block Drivers
+-------------------------------
+
+1. **Nics Driver**
+
+ See :doc:`../nics/dpaa2` for NXP dpaa2 nic driver information.
+
+2. **Cryptodev Driver**
+
+ See :doc:`../cryptodevs/dpaa2_sec` for NXP dpaa2 cryptodev driver information.
+
+3. **Eventdev Driver**
+
+ See :doc:`../eventdevs/dpaa2` for NXP dpaa2 eventdev driver information.
+
+4. **Rawdev AIOP CMDIF Driver**
+
+ See :doc:`../rawdevs/dpaa2_cmdif` for NXP dpaa2 AIOP command interface driver information.
+
+5. **Rawdev QDMA Driver**
+
+ See :doc:`../rawdevs/dpaa2_qdma` for NXP dpaa2 QDMA driver information.
+
+
+Steps To Setup Platform
+-----------------------
+
+There are four main pre-requisities for executing DPAA2 PMD on a DPAA2
+compatible board:
+
+1. **ARM 64 Tool Chain**
+
+ For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/7.3-2018.05/aarch64-linux-gnu/gcc-linaro-7.3.1-2018.05-i686_aarch64-linux-gnu.tar.xz>`_.
+
+2. **Linux Kernel**
+
+ It can be obtained from `NXP's Github hosting <https://source.codeaurora.org/external/qoriq/qoriq-components/linux>`_.
+
+3. **Rootfile system**
+
+ Any *aarch64* supporting filesystem can be used. For example,
+ Ubuntu 16.04 LTS (Xenial) or 18.04 (Bionic) userland which can be obtained
+ from `here
+ <http://cdimage.ubuntu.com/ubuntu-base/releases/18.04/release/ubuntu-base-18.04.1-base-arm64.tar.gz>`_.
+
+4. **Resource Scripts**
+
+ DPAA2 based resources can be configured easily with the help of ready scripts
+ as provided in the DPDK Extra repository.
+
+As an alternative method, DPAA2 PMD can also be executed using images provided
+as part of SDK from NXP. The SDK includes all the above prerequisites necessary
+to bring up a DPAA2 board.
+
+The following dependencies are not part of DPDK and must be installed
+separately:
+
+- **NXP Linux SDK**
+
+ NXP Linux software development kit (SDK) includes support for family
+ of QorIQ® ARM-Architecture-based system on chip (SoC) processors
+ and corresponding boards.
+
+ It includes the Linux board support packages (BSPs) for NXP SoCs,
+ a fully operational tool chain, kernel and board specific modules.
+
+ SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
+
+
+.. _extra_scripts:
+
+- **DPDK Extra Scripts**
+
+ DPAA2 based resources can be configured easily with the help of ready scripts
+ as provided in the DPDK Extra repository.
+
+ `DPDK Extras Scripts <https://source.codeaurora.org/external/qoriq/qoriq-components/dpdk-extras>`_.
+
+Currently supported by DPDK:
+
+- NXP SDK **2.0+** (preferred: LSDK 18.09).
+- MC Firmware version **10.10.0** and higher.
+- Supported architectures: **arm64 LE**.
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>`
+ to setup the basic DPDK environment.
:maxdepth: 2
:numbered:
+ dpaa
+ dpaa2
octeontx
Common Offload HW Block Drivers
-------------------------------
-1. **Eventdev Driver**
+1. **Crypto Driver**
+ See :doc:`../cryptodevs/octeontx` for octeontx crypto driver
+ information.
+
+2. **Eventdev Driver**
See :doc:`../eventdevs/octeontx` for octeontx ssovf eventdev driver
information.
-2. **Mempool Driver**
+3. **Mempool Driver**
See :doc:`../mempool/octeontx` for octeontx fpavf mempool driver
information.
Platform drivers) are available on Github at `octeontx-kmod <https://github.com/caviumnetworks/octeontx-kmod>`_
along with build, install and dpdk usage instructions.
+.. note::
+
+ The PF driver and the required microcode for the crypto offload block will be
+ available with OCTEON TX SDK only. So for using crypto offload, follow the steps
+ mentioned in :ref:`setup_platform_using_OCTEON_TX_SDK`.
+
2. **ARM64 Tool Chain**
For example, the *aarch64* Linaro Toolchain, which can be obtained from
As an alternative method, Platform drivers can also be executed using images provided
as part of SDK from Cavium. The SDK includes all the above prerequisites necessary
- to bring up a OCTEON TX board.
-
- SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
+ to bring up a OCTEON TX board. Please refer :ref:`setup_platform_using_OCTEON_TX_SDK`.
- Follow the DPDK :doc:`../linux_gsg/index` to setup the basic DPDK environment.
+
+.. _setup_platform_using_OCTEON_TX_SDK:
+
+Setup Platform Using OCTEON TX SDK
+----------------------------------
+
+The OCTEON TX platform drivers can be compiled either natively on
+**OCTEON TX** :sup:`®` board or cross-compiled on an x86 based platform.
+
+The **OCTEON TX** :sup:`®` board must be running the linux kernel based on
+OCTEON TX SDK 6.2.0 patch 3. In this, the PF drivers for all hardware
+offload blocks are already built in.
+
+Native Compilation
+~~~~~~~~~~~~~~~~~~
+
+If the kernel and modules are cross-compiled and copied to the target board,
+some intermediate binaries required for native build would be missing on the
+target board. To make sure all the required binaries are available in the
+native architecture, the linux sources need to be compiled once natively.
+
+.. code-block:: console
+
+ cd /lib/modules/$(uname -r)/source
+ make menuconfig
+ make
+
+The above steps would rebuild the modules and the required intermediate binaries.
+Once the target is ready for native compilation, the OCTEON TX platform
+drivers can be compiled with the following steps,
+
+.. code-block:: console
+
+ cd <dpdk directory>
+ make config T=arm64-thunderx-linuxapp-gcc
+ make
+
+The example applications can be compiled using the following:
+
+.. code-block:: console
+
+ cd <dpdk directory>
+ export RTE_SDK=$PWD
+ export RTE_TARGET=build
+ cd examples/<application>
+ make
+
+Cross Compilation
+~~~~~~~~~~~~~~~~~
+
+The DPDK applications can be cross-compiled on any x86 based platform. The
+OCTEON TX SDK need to be installed on the build system. The SDK package will
+provide the required toolchain etc.
+
+Refer to :doc:`../linux_gsg/cross_build_dpdk_for_arm64` for further steps on
+compilation. The 'host' & 'CC' to be used in the commands would change,
+in addition to the paths to which libnuma related files have to be
+copied.
+
+The following steps can be used to perform cross-compilation with OCTEON TX
+SDK 6.2.0 patch 3:
+
+.. code-block:: console
+
+ cd <sdk_install_dir>
+ source env-setup
+
+ git clone https://github.com/numactl/numactl.git
+ cd numactl
+ git checkout v2.0.11 -b v2.0.11
+ ./autogen.sh
+ autoconf -i
+ ./configure --host=aarch64-thunderx-linux CC=aarch64-thunderx-linux-gnu-gcc --prefix=<numa install dir>
+ make install
+
+The above steps will prepare build system with numa additions. Now this build system can be used
+to build applications for **OCTEON TX** :sup:`®` platforms.
+
+.. code-block:: console
+
+ cd <dpdk directory>
+ export RTE_SDK=$PWD
+ export RTE_KERNELDIR=$THUNDER_ROOT/linux/kernel/linux
+ make config T=arm64-thunderx-linuxapp-gcc
+ make -j CROSS=aarch64-thunderx-linux-gnu- CONFIG_RTE_KNI_KMOD=n CONFIG_RTE_EAL_IGB_UIO=n EXTRA_CFLAGS="-isystem <numa_install_dir>/include" EXTRA_LDFLAGS="-L<numa_install_dir>/lib -lnuma"
+
+If NUMA support is not required, it can be disabled as explained in
+:doc:`../linux_gsg/cross_build_dpdk_for_arm64`.
+
+Following steps could be used in that case.
+
+.. code-block:: console
+
+ make config T=arm64-thunderx-linuxapp-gcc
+ make CROSS=aarch64-thunderx-linux-gnu-
+
+
+SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
.. code-block:: c
rte_latencystats_uninit();
+
+Timestamp and latency calculation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Latency stats library marks the time in the timestamp field of the
+mbuf for the ingress packets and sets the ``PKT_RX_TIMESTAMP`` flag of
+``ol_flags`` for the mbuf to indicate the marked time as a valid one.
+At the egress, the mbufs with the flag set are considered having valid
+timestamp and are used for the latency calculation.
Prerequisites
-------------
-There are three main pre-requisities for executing DPAA2 CMDIF on a DPAA2
-compatible board:
-
-1. **ARM 64 Tool Chain**
-
- For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.3-2017.02/aarch64-linux-gnu>`_.
-
-2. **Linux Kernel**
-
- It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile system**
-
- Any *aarch64* supporting filesystem can be used. For example,
- Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
- from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-As an alternative method, DPAA2 CMDIF can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA2 board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
- NXP Linux software development kit (SDK) includes support for family
- of QorIQ® ARM-Architecture-based system on chip (SoC) processors
- and corresponding boards.
-
- It includes the Linux board support packages (BSPs) for NXP SoCs,
- a fully operational tool chain, kernel and board specific modules.
-
- SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-- **DPDK Extra Scripts**
-
- DPAA2 based resources can be configured easily with the help of ready scripts
- as provided in the DPDK Extra repository.
-
- `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa2` for setup information
Currently supported by DPDK:
-- NXP SDK **2.0+**.
-- MC Firmware version **10.0.0** and higher.
+- NXP SDK **18.09+**.
+- MC Firmware version **10.10.0** and higher.
- Supported architectures: **arm64 LE**.
- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
Supported DPAA2 SoCs
--------------------
+- LX2160A
- LS2084A/LS2044A
- LS2088A/LS2048A
- LS1088A/LS1048A
Prerequisites
-------------
-There are three main pre-requisities for executing DPAA2 QDMA on a DPAA2
-compatible board:
-
-1. **ARM 64 Tool Chain**
-
- For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.3-2017.02/aarch64-linux-gnu>`_.
-
-2. **Linux Kernel**
-
- It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
-
-3. **Rootfile system**
-
- Any *aarch64* supporting filesystem can be used. For example,
- Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
- from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
-
-As an alternative method, DPAA2 QDMA can also be executed using images provided
-as part of SDK from NXP. The SDK includes all the above prerequisites necessary
-to bring up a DPAA2 board.
-
-The following dependencies are not part of DPDK and must be installed
-separately:
-
-- **NXP Linux SDK**
-
- NXP Linux software development kit (SDK) includes support for family
- of QorIQ® ARM-Architecture-based system on chip (SoC) processors
- and corresponding boards.
-
- It includes the Linux board support packages (BSPs) for NXP SoCs,
- a fully operational tool chain, kernel and board specific modules.
-
- SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
-
-- **DPDK Extra Scripts**
-
- DPAA2 based resources can be configured easily with the help of ready scripts
- as provided in the DPDK Extra repository.
-
- `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+See :doc:`../platform/dpaa2` for setup information
Currently supported by DPDK:
-- NXP LSDK **17.12+**.
-- MC Firmware version **10.3.0** and higher.
+- NXP SDK **18.09+**.
+- MC Firmware version **10.10.0** and higher.
- Supported architectures: **arm64 LE**.
- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
their telemetry via a UNIX socket in JSON. The JSON can be consumed by any
Service Assurance agent, such as CollectD.
+* **Updated KNI kernel module, rte_kni library, and KNI sample application.**
+
+ Updated the KNI kernel module with a new kernel module parameter,
+ ``carrier=[on|off]`` to allow the user to control the default carrier
+ state of KNI kernel network interfaces. The default carrier state
+ is now set to ``off``, so the interfaces cannot be used until the
+ carrier state is set to ``on`` via ``rte_kni_update_link`` or
+ by writing ``1`` to ``/sys/devices/virtual/net/<iface>/carrier``.
+ In previous versions the default carrier state was left undefined.
+ See :doc:`../prog_guide/kernel_nic_interface` for more information.
+
+ Added the new API function ``rte_kni_update_link`` to allow the user
+ to set the carrier state of the KNI kernel network interface.
+
+ Added a new command line flag ``-m`` to the KNI sample application to
+ monitor and automatically reflect the physical NIC carrier state to the
+ KNI kernel network interface with the new ``rte_kni_update_link`` API.
+ See :doc:`../sample_app_ug/kernel_nic_interface` for more information.
+
* **Added ability to switch queue deferred start flag on testpmd app.**
Added a console command to testpmd app, giving ability to switch
* eventdev: Type of 2nd parameter to ``rte_event_eth_rx_adapter_caps_get()``
has been changed from uint8_t to uint16_t.
+* kni: By default, interface carrier status is ``off`` which means there won't
+ be any traffic. It can be set to ``on`` via ``rte_kni_update_link()`` API
+ or via ``sysfs`` interface:
+ ``echo 1 > /sys/class/net/vEth0/carrier``.
+ Note interface should be ``up`` to be able to read/write sysfs interface.
+ When KNI sample application is used, ``-m`` parameter can be used to
+ automatically update the carrier status for the interface.
+
+* kni: When ethtool support enabled (``CONFIG_RTE_KNI_KMOD_ETHTOOL=y``)
+ ethtool commands ``ETHTOOL_GSET & ETHTOOL_SSET`` are no more supported for the
+ kernels that has ``ETHTOOL_GLINKSETTINGS & ETHTOOL_SLINKSETTINGS`` support.
+ This means ``ethtool "-a|--show-pause", "-s|--change"`` won't work, and
+ ``ethtool <iface>`` output will have less information.
+
ABI Changes
-----------
driver; the Linux netvsc device must be brought up before the netvsc device is
unbound and passed to the DPDK.
+* IBM Power8 is not supported by this release of DPDK. IBM Power9 is supported.
+
Tested Platforms
----------------
To compile all the sample applications
--------------------------------------
-
Set the path to DPDK source code if its not set:
.. code-block:: console
export RTE_TARGET=build
make
+
+To cross compile the sample application(s)
+------------------------------------------
+
+For cross compiling the sample application(s), please append 'CROSS=$(CROSS_COMPILER_PREFIX)' to the 'make' command.
+In example of AARCH64 cross compiling:
+
+ .. code-block:: console
+
+ export RTE_TARGET=build
+ export RTE_SDK=/path/to/rte_sdk
+ make -C examples CROSS=aarch64-linux-gnu-
+ or
+ make CROSS=aarch64-linux-gnu-
The generate_ipv4_flow function
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The generate_ipv4_rule function is responsible for creating the flow rule.
+The generate_ipv4_flow function is responsible for creating the flow rule.
This function is located in the ``flow_blocks.c`` file.
.. code-block:: c
* The second difference is that the application differentiates between IP and non-IP traffic by means of offload flags.
-The Longest Prefix Match (LPM for IPv4, LPM6 for IPv6) table is used to store/lookup an outgoing port number, associated with that IPv4 address. Any unmatched packets are forwarded to the originating port.Compiling the Application
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+The Longest Prefix Match (LPM for IPv4, LPM6 for IPv6) table is used to store/lookup an outgoing port number,
+associated with that IPv4 address. Any unmatched packets are forwarded to the originating port.
Compiling the Application
hdr->pkt.in_port = pkt->pkt.in_port;
hdr->pkt.vlan_macip = pkt->pkt.vlan_macip;
hdr->pkt.hash = pkt->pkt.hash;
- hdr->ol_flags = pkt->ol_flags;
rte_mbuf_sanity_check(hdr, RTE_MBUF_PKT, 1);
return hdr;
The resulting binary will be ${RTE_SDK}/build/examples/guest_cli
+.. Note::
+ This sample application conditionally links in the Jansson JSON
+ library, so if you are using a multilib or cross compile environment you
+ may need to set the ``PKG_CONFIG_LIBDIR`` environmental variable to point to
+ the relevant pkgconfig folder so that the correct library is linked in.
+
+ For example, if you are building for a 32-bit target, you could find the
+ correct directory using the following ``find`` command:
+
+ .. code-block:: console
+
+ # find /usr -type d -name pkgconfig
+ /usr/lib/i386-linux-gnu/pkgconfig
+ /usr/lib/x86_64-linux-gnu/pkgconfig
+
+ Then use:
+
+ .. code-block:: console
+
+ export PKG_CONFIG_LIBDIR=/usr/lib/i386-linux-gnu/pkgconfig
+
+ You then use the make command as normal, which should find the 32-bit
+ version of the library, if it installed. If not, the application will
+ be built without the JSON interface functionality.
+
To build just the ``vm_power_manager`` application using ``meson/ninja``:
.. code-block:: console
#include <rte_vfio.h>
#include <rte_eal.h>
#include <rte_bus.h>
+#include <rte_spinlock.h>
#include "eal_filesystem.h"
}
#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+/*
+ * Spinlock for device hot-unplug failure handling.
+ * If it tries to access bus or device, such as handle sigbus on bus
+ * or handle memory failure for device, just need to use this lock.
+ * It could protect the bus and the device to avoid race condition.
+ */
+static rte_spinlock_t failure_handle_lock = RTE_SPINLOCK_INITIALIZER;
+
static void
pci_vfio_req_handler(void *param)
{
int ret;
struct rte_device *device = (struct rte_device *)param;
+ rte_spinlock_lock(&failure_handle_lock);
bus = rte_bus_find_by_device(device);
if (bus == NULL) {
RTE_LOG(ERR, EAL, "Cannot find bus for device (%s)\n",
device->name);
- return;
+ goto handle_end;
}
/*
RTE_LOG(ERR, EAL,
"Can not handle hot-unplug for device (%s)\n",
device->name);
+handle_end:
+ rte_spinlock_unlock(&failure_handle_lock);
}
/* enable notifier (only enable req now) */
uint8_t *addr_direction)
{
uint8_t found = 0;
+ uint32_t pos;
+ uint8_t last_byte;
while (!found && counter_num_bytes > 0) {
counter_num_bytes--;
if (src[counter_num_bytes] == 0x00)
continue;
- if (src[counter_num_bytes] == 0x80) {
- *addr_direction = src[counter_num_bytes - 1] & 0x1;
- *addr_length_in_bits = counter_num_bytes * 8 - 1;
- found = 1;
- } else {
- int i = 0;
- uint8_t last_byte = src[counter_num_bytes];
- for (i = 0; i < 8 && found == 0; i++) {
- if (last_byte & (1 << i)) {
- *addr_direction = (last_byte >> (i+1))
- & 0x1;
- if (i != 6)
- *addr_length_in_bits =
- counter_num_bytes * 8
- + (8 - (i + 2));
- else
- *addr_length_in_bits =
- counter_num_bytes * 8;
- found = 1;
- }
- }
+ pos = rte_bsf32(src[counter_num_bytes]);
+ if (pos == 7) {
+ if (likely(counter_num_bytes > 0)) {
+ last_byte = src[counter_num_bytes - 1];
+ *addr_direction = last_byte & 0x1;
+ *addr_length_in_bits = counter_num_bytes * 8
+ - 1;
}
+ } else {
+ last_byte = src[counter_num_bytes];
+ *addr_direction = (last_byte >> (pos + 1)) & 0x1;
+ *addr_length_in_bits = counter_num_bytes * 8
+ + (8 - (pos + 2));
+ }
+ found = 1;
}
}
PKT_TX_TCP_SEG)
#define AVF_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
"Failed to parse agg selection mode for bonded device %s",
name);
}
- if (internals->mode == BONDING_MODE_8023AD)
- rte_eth_bond_8023ad_agg_selection_set(port_id,
- agg_mode);
+ if (internals->mode == BONDING_MODE_8023AD) {
+ int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
+ agg_mode);
+ if (ret < 0) {
+ RTE_BOND_LOG(ERR,
+ "Invalid args for agg selection set for bonded device %s",
+ name);
+ return -1;
+ }
+ }
}
/* Parse/add slave ports to bonded device */
{ ENA_ADMIN_PLACEMENT_POLICY_HOST,
0, 0, 0, 0, 0 };
uint16_t ena_qid;
+ unsigned int i;
int rc;
adapter = ring->adapter;
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
ctx.queue_size = adapter->tx_ring_size;
+ for (i = 0; i < ring->ring_size; i++)
+ ring->empty_tx_reqs[i] = i;
} else {
ena_qid = ENA_IO_RXQ_IDX(ring->id);
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
ctx.queue_size = adapter->rx_ring_size;
+ for (i = 0; i < ring->ring_size; i++)
+ ring->empty_rx_reqs[i] = i;
}
ctx.qid = ena_qid;
ctx.msix_vector = -1; /* interrupts not used */
for (i = 0; i < nb_txq; ++i) {
ena_qid = ENA_IO_TXQ_IDX(i);
ena_com_destroy_io_queue(ena_dev, ena_qid);
+
+ ena_tx_queue_release_bufs(&adapter->tx_ring[i]);
}
for (i = 0; i < nb_rxq; ++i) {
return 0;
}
+/**
+ * This function is used to check if the register is valid.
+ * Below is the valid registers list for X722 only:
+ * 0x2b800--0x2bb00
+ * 0x38700--0x38a00
+ * 0x3d800--0x3db00
+ * 0x208e00--0x209000
+ * 0x20be00--0x20c000
+ * 0x263c00--0x264000
+ * 0x265c00--0x266000
+ */
+static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
+{
+ if ((type != I40E_MAC_X722) &&
+ ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
+ (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
+ (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
+ (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
+ (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
+ (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
+ (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
+ return 0;
+ else
+ return 1;
+}
+
static int i40e_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs)
{
reg_offset = arr_idx * reg_info->stride1 +
arr_idx2 * reg_info->stride2;
reg_offset += reg_info->base_addr;
- ptr_data[reg_offset >> 2] =
- I40E_READ_REG(hw, reg_offset);
+ if (!i40e_valid_regs(hw->mac.type, reg_offset))
+ ptr_data[reg_offset >> 2] = 0;
+ else
+ ptr_data[reg_offset >> 2] =
+ I40E_READ_REG(hw, reg_offset);
}
}
return -EINVAL;
if (!in->key && in->key_len)
return -EINVAL;
- if (in->key)
- out->conf.key = memcpy(out->key, in->key, in->key_len);
out->conf = (struct rte_flow_action_rss){
.func = in->func,
.level = in->level,
.queue = memcpy(out->queue, in->queue,
sizeof(*in->queue) * in->queue_num),
};
+ if (in->key)
+ out->conf.key = memcpy(out->key, in->key, in->key_len);
return 0;
}
ixgbe_flap_tx_laser(hw);
/* Wait for the controller to acquire link. Per IEEE 802.3ap,
- * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * Section 73.10.2, we may have to wait up to 1000ms if KR is
* attempted. 82599 uses the same timing for 10g SFI.
*/
for (i = 0; i < 10; i++) {
DEBUG("port %u inserting MR(%p) to global cache",
dev->data->port_id, (void *)mr);
for (n = 0; n < mr->ms_bmp_n; ) {
- struct mlx4_mr_cache entry = { 0, };
+ struct mlx4_mr_cache entry;
+ memset(&entry, 0, sizeof(entry));
/* Find a contiguous chunk and advance the index. */
n = mr_find_next_chunk(mr, &entry, n);
if (!entry.end)
if (mr->ms_n == 0)
continue;
for (n = 0; n < mr->ms_bmp_n; ) {
- struct mlx4_mr_cache ret = { 0, };
+ struct mlx4_mr_cache ret;
+ memset(&ret, 0, sizeof(ret));
n = mr_find_next_chunk(mr, &ret, n);
if (addr >= ret.start && addr < ret.end) {
/* Found. */
* Find out a contiguous virtual address chunk in use, to which the
* given address belongs, in order to register maximum range. In the
* best case where mempools are not dynamically recreated and
- * '--socket-mem' is speicified as an EAL option, it is very likely to
+ * '--socket-mem' is specified as an EAL option, it is very likely to
* have only one MR(LKey) per a socket and per a hugepage-size even
* though the system memory is highly fragmented.
*/
*/
for (n = 0; n < ms_n; ++n) {
uintptr_t start;
- struct mlx4_mr_cache ret = { 0, };
+ struct mlx4_mr_cache ret;
+ memset(&ret, 0, sizeof(ret));
start = data_re.start + n * msl->page_sz;
/* Exclude memsegs already registered by other MRs. */
if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
* @return
* Searched LKey on success, UINT32_MAX on no match.
*/
-uint32_t
+static uint32_t
mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
{
struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
}
+/**
+ * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
+ * list, register the mempool of the mbuf as externally allocated memory.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param mb
+ * Pointer to mbuf.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb)
+{
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ uint32_t lkey;
+
+ lkey = mlx4_tx_addr2mr_bh(txq, addr);
+ if (lkey == UINT32_MAX && rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ return mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb));
+ }
+ return lkey;
+}
+
/**
* Flush all of the local cache entries.
*
if (mr->ms_n == 0)
continue;
for (n = 0; n < mr->ms_bmp_n; ) {
- struct mlx4_mr_cache ret = { 0, };
+ struct mlx4_mr_cache ret;
+ memset(&ret, 0, sizeof(ret));
n = mr_find_next_chunk(mr, &ret, n);
if (!ret.end)
break;
void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl);
uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr);
-uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr);
+uint32_t mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb);
uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr,
struct rte_mempool *mp);
* @return
* Memory pool where data is located for given mbuf.
*/
-static struct rte_mempool *
+static inline struct rte_mempool *
mlx4_mb2mp(struct rte_mbuf *buf)
{
if (unlikely(RTE_MBUF_INDIRECT(buf)))
* Searched LKey on success, UINT32_MAX on no match.
*/
static __rte_always_inline uint32_t
-mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr)
+mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
{
struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
uint32_t lkey;
/* Check generation bit to see if there's any change on existing MRs. */
MLX4_MR_CACHE_N, addr);
if (likely(lkey != UINT32_MAX))
return lkey;
- /* Take slower bottom-half (binary search) on miss. */
- return mlx4_tx_addr2mr_bh(txq, addr);
-}
-
-static __rte_always_inline uint32_t
-mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb)
-{
- uintptr_t addr = (uintptr_t)mb->buf_addr;
- uint32_t lkey = mlx4_tx_addr2mr(txq, addr);
-
- if (likely(lkey != UINT32_MAX))
- return lkey;
- if (rte_errno == ENXIO) {
- /* Mempool may have externally allocated memory. */
- lkey = mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb));
- }
- return lkey;
+ /* Take slower bottom-half on miss. */
+ return mlx4_tx_mb2mr_bh(txq, mb);
}
#endif /* MLX4_RXTX_H_ */
},
{
.tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
- .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
+ .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
},
{
.tunnel = MLX5_FLOW_LAYER_MPLS,
/**
* Validate MPLS item.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] item
* Item specification.
* @param[in] item_flags
* Bit-fields that holds the items detected until now.
- * @param[in] target_protocol
- * The next protocol in the previous item.
+ * @param[in] prev_layer
+ * The protocol layer indicated in previous item.
* @param[out] error
* Pointer to error structure.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_flow_validate_item_mpls(const struct rte_flow_item *item __rte_unused,
+mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_item *item __rte_unused,
uint64_t item_flags __rte_unused,
- uint8_t target_protocol __rte_unused,
+ uint64_t prev_layer __rte_unused,
struct rte_flow_error *error)
{
#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
const struct rte_flow_item_mpls *mask = item->mask;
+ struct priv *priv = dev->data->dev_private;
int ret;
- if (target_protocol != 0xff && target_protocol != IPPROTO_MPLS)
+ if (!priv->config.mpls_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "MPLS not supported or"
+ " disabled in firmware"
+ " configuration.");
+ /* MPLS over IP, UDP, GRE is allowed */
+ if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
+ MLX5_FLOW_LAYER_OUTER_L4_UDP |
+ MLX5_FLOW_LAYER_GRE)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"protocol filtering not compatible"
flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
struct rte_flow *flow)
{
- flow_drv_destroy(dev, flow);
- TAILQ_REMOVE(list, flow, next);
/*
* Update RX queue flags only if port is started, otherwise it is
* already clean.
*/
if (dev->data->dev_started)
flow_rxq_flags_trim(dev, flow);
+ flow_drv_destroy(dev, flow);
+ TAILQ_REMOVE(list, flow, next);
rte_free(flow->fdir);
rte_free(flow);
}
#define IPPROTO_MPLS 137
#endif
+/* UDP port number for MPLS */
+#define MLX5_UDP_PORT_MPLS 6635
+
/* UDP port numbers for VxLAN. */
#define MLX5_UDP_PORT_VXLAN 4789
#define MLX5_UDP_PORT_VXLAN_GPE 4790
struct mlx5_flow_tcf {
struct nlmsghdr *nlh;
struct tcmsg *tcm;
+ uint32_t *ptc_flags; /**< tc rule applied flags. */
union { /**< Tunnel encap/decap descriptor. */
struct flow_tcf_tunnel_hdr *tunnel;
struct flow_tcf_vxlan_decap *vxlan_decap;
int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
uint64_t item_flags,
struct rte_flow_error *error);
-int mlx5_flow_validate_item_mpls(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
uint64_t item_flags,
- uint8_t target_protocol,
+ uint64_t prev_layer,
struct rte_flow_error *error);
int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
uint64_t item_flags,
int ret;
uint64_t action_flags = 0;
uint64_t item_flags = 0;
+ uint64_t last_item = 0;
int tunnel = 0;
uint8_t next_protocol = 0xff;
int actions_n = 0;
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_validate_item_vlan(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
- MLX5_FLOW_LAYER_OUTER_VLAN;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv4 *)
items->mask)->hdr.next_proto_id) {
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv6 *)
items->mask)->hdr.proto) {
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
ret = mlx5_flow_validate_item_udp(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
case RTE_FLOW_ITEM_TYPE_NVGRE:
next_protocol, error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_GRE;
+ last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
ret = mlx5_flow_validate_item_vxlan(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
ret = mlx5_flow_validate_item_vxlan_gpe(items,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_validate_item_mpls(dev, items,
+ item_flags,
+ last_item, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_MPLS;
break;
case RTE_FLOW_ITEM_TYPE_META:
ret = flow_dv_validate_item_meta(dev, items, attr,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_ITEM_METADATA;
+ last_item = MLX5_FLOW_ITEM_METADATA;
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
}
+ item_flags |= last_item;
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
}
+/**
+ * Add MPLS item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] prev_layer
+ * The protocol layer indicated in previous item.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_mpls(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ uint64_t prev_layer,
+ int inner)
+{
+ const uint32_t *in_mpls_m = item->mask;
+ const uint32_t *in_mpls_v = item->spec;
+ uint32_t *out_mpls_m = 0;
+ uint32_t *out_mpls_v = 0;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_2);
+ void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+
+ switch (prev_layer) {
+ case MLX5_FLOW_LAYER_OUTER_L4_UDP:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_MPLS);
+ break;
+ case MLX5_FLOW_LAYER_GRE:
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ ETHER_TYPE_MPLS);
+ break;
+ default:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ IPPROTO_MPLS);
+ break;
+ }
+ if (!in_mpls_v)
+ return;
+ if (!in_mpls_m)
+ in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
+ switch (prev_layer) {
+ case MLX5_FLOW_LAYER_OUTER_L4_UDP:
+ out_mpls_m =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
+ outer_first_mpls_over_udp);
+ out_mpls_v =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_udp);
+ break;
+ case MLX5_FLOW_LAYER_GRE:
+ out_mpls_m =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
+ outer_first_mpls_over_gre);
+ out_mpls_v =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_gre);
+ break;
+ default:
+ /* Inner MPLS not over GRE is not supported. */
+ if (!inner) {
+ out_mpls_m =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
+ misc2_m,
+ outer_first_mpls);
+ out_mpls_v =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
+ misc2_v,
+ outer_first_mpls);
+ }
+ break;
+ }
+ if (out_mpls_m && out_mpls_v) {
+ *out_mpls_m = *in_mpls_m;
+ *out_mpls_v = *in_mpls_v & *in_mpls_m;
+ }
+}
+
/**
* Add META item to matcher
*
struct priv *priv = dev->data->dev_private;
struct rte_flow *flow = dev_flow->flow;
uint64_t item_flags = 0;
+ uint64_t last_item = 0;
uint64_t action_flags = 0;
uint64_t priority = attr->priority;
struct mlx5_flow_dv_matcher matcher = {
flow_dv_translate_item_eth(match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L2;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
flow_dv_translate_item_vlan(match_mask, match_value,
items, tunnel);
matcher.priority = MLX5_PRIORITY_MAP_L2;
- item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
- MLX5_FLOW_LAYER_INNER_VLAN) :
- (MLX5_FLOW_LAYER_OUTER_L2 |
- MLX5_FLOW_LAYER_OUTER_VLAN);
+ last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
+ MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
flow_dv_translate_item_ipv4(match_mask, match_value,
(dev_flow, tunnel,
MLX5_IPV4_LAYER_TYPES,
MLX5_IPV4_IBV_RX_HASH);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
flow_dv_translate_item_ipv6(match_mask, match_value,
(dev_flow, tunnel,
MLX5_IPV6_LAYER_TYPES,
MLX5_IPV6_IBV_RX_HASH);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(match_mask, match_value,
(dev_flow, tunnel, ETH_RSS_TCP,
IBV_RX_HASH_SRC_PORT_TCP |
IBV_RX_HASH_DST_PORT_TCP);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
flow_dv_translate_item_udp(match_mask, match_value,
(dev_flow, tunnel, ETH_RSS_UDP,
IBV_RX_HASH_SRC_PORT_UDP |
IBV_RX_HASH_DST_PORT_UDP);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
flow_dv_translate_item_gre(match_mask, match_value,
items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_GRE;
+ last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
flow_dv_translate_item_nvgre(match_mask, match_value,
items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_GRE;
+ last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(match_mask, match_value,
items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
flow_dv_translate_item_vxlan(match_mask, match_value,
items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ flow_dv_translate_item_mpls(match_mask, match_value,
+ items, last_item, tunnel);
+ last_item = MLX5_FLOW_LAYER_MPLS;
break;
case RTE_FLOW_ITEM_TYPE_META:
flow_dv_translate_item_meta(match_mask, match_value,
items);
- item_flags |= MLX5_FLOW_ITEM_METADATA;
+ last_item = MLX5_FLOW_ITEM_METADATA;
break;
default:
break;
}
+ item_flags |= last_item;
}
assert(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
void *data __rte_unused,
struct rte_flow_error *error __rte_unused)
{
- rte_errno = ENOTSUP;
- return -rte_errno;
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "flow query with DV is not supported");
}
#ifndef TCA_CLS_FLAGS_SKIP_SW
#define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
#endif
+#ifndef TCA_CLS_FLAGS_IN_HW
+#define TCA_CLS_FLAGS_IN_HW (1 << 2)
+#endif
#ifndef HAVE_TCA_CHAIN
#define TCA_CHAIN 11
#endif
assert(na_flower);
assert(na_flower_act);
mnl_attr_nest_end(nlh, na_flower_act);
+ dev_flow->tcf.ptc_flags = mnl_attr_get_payload
+ (mnl_nlmsg_get_payload_tail(nlh));
mnl_attr_put_u32(nlh, TCA_FLOWER_FLAGS, decap.vxlan ?
0 : TCA_CLS_FLAGS_SKIP_SW);
mnl_attr_nest_end(nlh, na_flower);
* @param nlh
* Message to send. This function always raises the NLM_F_ACK flag before
* sending.
- * @param[in] msglen
- * Message length. Message buffer may contain multiple commands and
- * nlmsg_len field not always corresponds to actual message length.
- * If 0 specified the nlmsg_len field in header is used as message length.
* @param[in] cb
* Callback handler for received message.
* @param[in] arg
static int
flow_tcf_nl_ack(struct mlx5_flow_tcf_context *tcf,
struct nlmsghdr *nlh,
- uint32_t msglen,
mnl_cb_t cb, void *arg)
{
unsigned int portid = mnl_socket_get_portid(tcf->nl);
uint32_t seq = tcf->seq++;
- int err, ret;
+ int ret, err = 0;
assert(tcf->nl);
assert(tcf->buf);
- if (!seq)
+ if (!seq) {
/* seq 0 is reserved for kernel event-driven notifications. */
seq = tcf->seq++;
+ }
nlh->nlmsg_seq = seq;
- if (!msglen) {
- msglen = nlh->nlmsg_len;
- nlh->nlmsg_flags |= NLM_F_ACK;
+ nlh->nlmsg_flags |= NLM_F_ACK;
+ ret = mnl_socket_sendto(tcf->nl, nlh, nlh->nlmsg_len);
+ if (ret <= 0) {
+ /* Message send error occurres. */
+ rte_errno = errno;
+ return -rte_errno;
}
- ret = mnl_socket_sendto(tcf->nl, nlh, msglen);
- err = (ret <= 0) ? errno : 0;
nlh = (struct nlmsghdr *)(tcf->buf);
/*
* The following loop postpones non-fatal errors until multipart
* messages are complete.
*/
- if (ret > 0)
- while (true) {
- ret = mnl_socket_recvfrom(tcf->nl, tcf->buf,
- tcf->buf_size);
+ while (true) {
+ ret = mnl_socket_recvfrom(tcf->nl, tcf->buf, tcf->buf_size);
+ if (ret < 0) {
+ err = errno;
+ /*
+ * In case of overflow Will receive till
+ * end of multipart message. We may lost part
+ * of reply messages but mark and return an error.
+ */
+ if (err != ENOSPC ||
+ !(nlh->nlmsg_flags & NLM_F_MULTI) ||
+ nlh->nlmsg_type == NLMSG_DONE)
+ break;
+ } else {
+ ret = mnl_cb_run(nlh, ret, seq, portid, cb, arg);
+ if (!ret) {
+ /*
+ * libmnl returns 0 if DONE or
+ * success ACK message found.
+ */
+ break;
+ }
if (ret < 0) {
+ /*
+ * ACK message with error found
+ * or some error occurred.
+ */
err = errno;
- if (err != ENOSPC)
- break;
- }
- if (!err) {
- ret = mnl_cb_run(nlh, ret, seq, portid,
- cb, arg);
- if (ret < 0) {
- err = errno;
- break;
- }
- }
- /* Will receive till end of multipart message */
- if (!(nlh->nlmsg_flags & NLM_F_MULTI) ||
- nlh->nlmsg_type == NLMSG_DONE)
break;
+ }
+ /* We should continue receiving. */
}
+ }
if (!err)
return 0;
rte_errno = err;
nlh = (struct nlmsghdr *)&bc->msg[msg];
assert((bc->size - msg) >= nlh->nlmsg_len);
msg += nlh->nlmsg_len;
- rc = flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL);
+ rc = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
if (rc) {
DRV_LOG(WARNING,
"netlink: cleanup error %d", rc);
ifa->ifa_family = AF_UNSPEC;
ifa->ifa_index = ifindex;
ifa->ifa_scope = RT_SCOPE_LINK;
- ret = flow_tcf_nl_ack(tcf, nlh, 0, flow_tcf_collect_local_cb, &ctx);
+ ret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_local_cb, &ctx);
if (ret)
DRV_LOG(WARNING, "netlink: query device list error %d", ret);
ret = flow_tcf_send_nlcmd(tcf, &ctx);
ndm->ndm_family = AF_UNSPEC;
ndm->ndm_ifindex = ifindex;
ndm->ndm_state = NUD_PERMANENT;
- ret = flow_tcf_nl_ack(tcf, nlh, 0, flow_tcf_collect_neigh_cb, &ctx);
+ ret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_neigh_cb, &ctx);
if (ret)
DRV_LOG(WARNING, "netlink: query device list error %d", ret);
ret = flow_tcf_send_nlcmd(tcf, &ctx);
nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
ifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm));
ifm->ifi_family = AF_UNSPEC;
- ret = flow_tcf_nl_ack(tcf, nlh, 0, flow_tcf_collect_vxlan_cb, &ctx);
+ ret = flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_vxlan_cb, &ctx);
if (ret)
DRV_LOG(WARNING, "netlink: query device list error %d", ret);
ret = flow_tcf_send_nlcmd(tcf, &ctx);
sizeof(encap->ipv6.dst),
&encap->ipv6.dst);
}
- if (!flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL))
+ if (!flow_tcf_nl_ack(tcf, nlh, NULL, NULL))
return 0;
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
if (encap->mask & FLOW_TCF_ENCAP_ETH_DST)
mnl_attr_put(nlh, NDA_LLADDR, sizeof(encap->eth.dst),
&encap->eth.dst);
- if (!flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL))
+ if (!flow_tcf_nl_ack(tcf, nlh, NULL, NULL))
return 0;
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
ifm->ifi_family = AF_UNSPEC;
ifm->ifi_index = vtep->ifindex;
assert(sizeof(buf) >= nlh->nlmsg_len);
- ret = flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL);
+ ret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
if (ret)
DRV_LOG(WARNING, "netlink: error deleting vxlan"
" encap/decap ifindex %u",
mnl_attr_nest_end(nlh, na_vxlan);
mnl_attr_nest_end(nlh, na_info);
assert(sizeof(buf) >= nlh->nlmsg_len);
- ret = flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL);
+ ret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
if (ret) {
DRV_LOG(WARNING,
"netlink: VTEP %s create failure (%d)",
ifm->ifi_index = vtep->ifindex;
ifm->ifi_flags = IFF_UP;
ifm->ifi_change = IFF_UP;
- ret = flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL);
+ ret = flow_tcf_nl_ack(tcf, nlh, NULL, NULL);
if (ret) {
rte_flow_error_set(error, -errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
pthread_mutex_unlock(&vtep_list_mutex);
}
+struct tcf_nlcb_query {
+ uint32_t handle;
+ uint32_t tc_flags;
+ uint32_t flags_valid:1;
+};
+
+/**
+ * Collect queried rule attributes. This is callback routine called by
+ * libmnl mnl_cb_run() in loop for every message in received packet.
+ * Current implementation collects the flower flags only.
+ *
+ * @param[in] nlh
+ * Pointer to reply header.
+ * @param[in, out] arg
+ * Context pointer for this callback.
+ *
+ * @return
+ * A positive, nonzero value on success (required by libmnl
+ * to continue messages processing).
+ */
+static int
+flow_tcf_collect_query_cb(const struct nlmsghdr *nlh, void *arg)
+{
+ struct tcf_nlcb_query *query = arg;
+ struct tcmsg *tcm = mnl_nlmsg_get_payload(nlh);
+ struct nlattr *na, *na_opt;
+ bool flower = false;
+
+ if (nlh->nlmsg_type != RTM_NEWTFILTER ||
+ tcm->tcm_handle != query->handle)
+ return 1;
+ mnl_attr_for_each(na, nlh, sizeof(*tcm)) {
+ switch (mnl_attr_get_type(na)) {
+ case TCA_KIND:
+ if (strcmp(mnl_attr_get_payload(na), "flower")) {
+ /* Not flower filter, drop entire message. */
+ return 1;
+ }
+ flower = true;
+ break;
+ case TCA_OPTIONS:
+ if (!flower) {
+ /* Not flower options, drop entire message. */
+ return 1;
+ }
+ /* Check nested flower options. */
+ mnl_attr_for_each_nested(na_opt, na) {
+ switch (mnl_attr_get_type(na_opt)) {
+ case TCA_FLOWER_FLAGS:
+ query->flags_valid = 1;
+ query->tc_flags =
+ mnl_attr_get_u32(na_opt);
+ break;
+ }
+ }
+ break;
+ }
+ }
+ return 1;
+}
+
+/**
+ * Query a TC flower rule flags via netlink.
+ *
+ * @param[in] tcf
+ * Context object initialized by mlx5_flow_tcf_context_create().
+ * @param[in] dev_flow
+ * Pointer to the flow.
+ * @param[out] pflags
+ * pointer to the data retrieved by the query.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+flow_tcf_query_flags(struct mlx5_flow_tcf_context *tcf,
+ struct mlx5_flow *dev_flow,
+ uint32_t *pflags)
+{
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+ struct tcf_nlcb_query query = {
+ .handle = dev_flow->tcf.tcm->tcm_handle,
+ };
+
+ nlh = mnl_nlmsg_put_header(tcf->buf);
+ nlh->nlmsg_type = RTM_GETTFILTER;
+ nlh->nlmsg_flags = NLM_F_REQUEST;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ memcpy(tcm, dev_flow->tcf.tcm, sizeof(*tcm));
+ /*
+ * Ignore Netlink error for filter query operations.
+ * The reply length is sent by kernel as errno.
+ * Just check we got the flags option.
+ */
+ flow_tcf_nl_ack(tcf, nlh, flow_tcf_collect_query_cb, &query);
+ if (!query.flags_valid) {
+ *pflags = 0;
+ return -ENOENT;
+ }
+ *pflags = query.tc_flags;
+ return 0;
+}
+
+/**
+ * Query and check the in_hw set for specified rule.
+ *
+ * @param[in] tcf
+ * Context object initialized by mlx5_flow_tcf_context_create().
+ * @param[in] dev_flow
+ * Pointer to the flow to check.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+flow_tcf_check_inhw(struct mlx5_flow_tcf_context *tcf,
+ struct mlx5_flow *dev_flow)
+{
+ uint32_t flags;
+ int ret;
+
+ ret = flow_tcf_query_flags(tcf, dev_flow, &flags);
+ if (ret)
+ return ret;
+ return (flags & TCA_CLS_FLAGS_IN_HW) ? 0 : -ENOENT;
+}
+
+/**
+ * Remove flow from E-Switch by sending Netlink message.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to the sub flow.
+ */
+static void
+flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
+ struct mlx5_flow *dev_flow;
+ struct nlmsghdr *nlh;
+
+ if (!flow)
+ return;
+ dev_flow = LIST_FIRST(&flow->dev_flows);
+ if (!dev_flow)
+ return;
+ /* E-Switch flow can't be expanded. */
+ assert(!LIST_NEXT(dev_flow, next));
+ if (dev_flow->tcf.applied) {
+ nlh = dev_flow->tcf.nlh;
+ nlh->nlmsg_type = RTM_DELTFILTER;
+ nlh->nlmsg_flags = NLM_F_REQUEST;
+ flow_tcf_nl_ack(ctx, nlh, NULL, NULL);
+ if (dev_flow->tcf.tunnel) {
+ assert(dev_flow->tcf.tunnel->vtep);
+ flow_tcf_vtep_release(ctx,
+ dev_flow->tcf.tunnel->vtep,
+ dev_flow);
+ dev_flow->tcf.tunnel->vtep = NULL;
+ }
+ dev_flow->tcf.applied = 0;
+ }
+}
/**
* Apply flow to E-Switch by sending Netlink message.
*dev_flow->tcf.tunnel->ifindex_ptr =
dev_flow->tcf.tunnel->vtep->ifindex;
}
- if (!flow_tcf_nl_ack(ctx, nlh, 0, NULL, NULL)) {
+ if (!flow_tcf_nl_ack(ctx, nlh, NULL, NULL)) {
dev_flow->tcf.applied = 1;
+ if (*dev_flow->tcf.ptc_flags & TCA_CLS_FLAGS_SKIP_SW)
+ return 0;
+ /*
+ * Rule was applied without skip_sw flag set.
+ * We should check whether the rule was acctually
+ * accepted by hardware (have look at in_hw flag).
+ */
+ if (flow_tcf_check_inhw(ctx, dev_flow)) {
+ flow_tcf_remove(dev, flow);
+ return rte_flow_error_set
+ (error, ENOENT,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "netlink: rule has no in_hw flag set");
+ }
return 0;
}
if (dev_flow->tcf.tunnel) {
"netlink: failed to create TC flow rule");
}
-/**
- * Remove flow from E-Switch by sending Netlink message.
- *
- * @param[in] dev
- * Pointer to Ethernet device.
- * @param[in, out] flow
- * Pointer to the sub flow.
- */
-static void
-flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
- struct mlx5_flow *dev_flow;
- struct nlmsghdr *nlh;
-
- if (!flow)
- return;
- dev_flow = LIST_FIRST(&flow->dev_flows);
- if (!dev_flow)
- return;
- /* E-Switch flow can't be expanded. */
- assert(!LIST_NEXT(dev_flow, next));
- if (dev_flow->tcf.applied) {
- nlh = dev_flow->tcf.nlh;
- nlh->nlmsg_type = RTM_DELTFILTER;
- nlh->nlmsg_flags = NLM_F_REQUEST;
- flow_tcf_nl_ack(ctx, nlh, 0, NULL, NULL);
- if (dev_flow->tcf.tunnel) {
- assert(dev_flow->tcf.tunnel->vtep);
- flow_tcf_vtep_release(ctx,
- dev_flow->tcf.tunnel->vtep,
- dev_flow);
- dev_flow->tcf.tunnel->vtep = NULL;
- }
- dev_flow->tcf.applied = 0;
- }
-}
-
/**
* Remove flow from E-Switch and release resources of the device flow.
*
* Message received from Netlink.
* @param[out] data
* Pointer to data area to be filled by the parsing routine.
- * assumed to be a pinter to struct flow_tcf_stats_basic.
+ * assumed to be a pointer to struct flow_tcf_stats_basic.
*
* @return
* MNL_CB_OK value.
void *data,
struct rte_flow_error *error)
{
- struct flow_tcf_stats_basic sb_data = { 0 };
+ struct flow_tcf_stats_basic sb_data;
struct rte_flow_query_count *qc = data;
struct priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
ssize_t ret;
assert(qc);
+ memset(&sb_data, 0, sizeof(sb_data));
dev_flow = LIST_FIRST(&flow->dev_flows);
/* E-Switch flow can't be expanded. */
assert(!LIST_NEXT(dev_flow, next));
tcm->tcm_parent = TC_H_INGRESS;
assert(sizeof(buf) >= nlh->nlmsg_len);
/* Ignore errors when qdisc is already absent. */
- if (flow_tcf_nl_ack(ctx, nlh, 0, NULL, NULL) &&
+ if (flow_tcf_nl_ack(ctx, nlh, NULL, NULL) &&
rte_errno != EINVAL && rte_errno != ENOENT)
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
tcm->tcm_parent = TC_H_INGRESS;
mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress");
assert(sizeof(buf) >= nlh->nlmsg_len);
- if (flow_tcf_nl_ack(ctx, nlh, 0, NULL, NULL))
+ if (flow_tcf_nl_ack(ctx, nlh, NULL, NULL))
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"netlink: failed to create ingress"
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
struct priv *priv = dev->data->dev_private;
struct ibv_counters_init_attr init = {0};
- struct ibv_counter_attach_attr attach = {0};
+ struct ibv_counter_attach_attr attach;
int ret;
+ memset(&attach, 0, sizeof(attach));
counter->cs = mlx5_glue->create_counters(priv->ctx, &init);
if (!counter->cs) {
rte_errno = ENOTSUP;
int ret;
uint64_t action_flags = 0;
uint64_t item_flags = 0;
+ uint64_t last_item = 0;
uint8_t next_protocol = 0xff;
if (items == NULL)
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_validate_item_vlan(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
- MLX5_FLOW_LAYER_INNER_VLAN) :
- (MLX5_FLOW_LAYER_OUTER_L2 |
- MLX5_FLOW_LAYER_OUTER_VLAN);
+ last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
+ MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv4 *)
items->mask)->hdr.next_proto_id) {
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv6 *)
items->mask)->hdr.proto) {
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
ret = mlx5_flow_validate_item_vxlan(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
ret = mlx5_flow_validate_item_vxlan_gpe(items,
dev, error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
ret = mlx5_flow_validate_item_gre(items, item_flags,
next_protocol, error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_GRE;
+ last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
- ret = mlx5_flow_validate_item_mpls(items, item_flags,
- next_protocol,
- error);
+ ret = mlx5_flow_validate_item_mpls(dev, items,
+ item_flags,
+ last_item, error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_MPLS;
+ last_item = MLX5_FLOW_LAYER_MPLS;
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
}
+ item_flags |= last_item;
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
dev->data->port_id, (void *)mr);
for (n = 0; n < mr->ms_bmp_n; ) {
- struct mlx5_mr_cache entry = { 0, };
+ struct mlx5_mr_cache entry;
+ memset(&entry, 0, sizeof(entry));
/* Find a contiguous chunk and advance the index. */
n = mr_find_next_chunk(mr, &entry, n);
if (!entry.end)
if (mr->ms_n == 0)
continue;
for (n = 0; n < mr->ms_bmp_n; ) {
- struct mlx5_mr_cache ret = { 0, };
+ struct mlx5_mr_cache ret;
+ memset(&ret, 0, sizeof(ret));
n = mr_find_next_chunk(mr, &ret, n);
if (addr >= ret.start && addr < ret.end) {
/* Found. */
* Find out a contiguous virtual address chunk in use, to which the
* given address belongs, in order to register maximum range. In the
* best case where mempools are not dynamically recreated and
- * '--socket-mem' is speicified as an EAL option, it is very likely to
+ * '--socket-mem' is specified as an EAL option, it is very likely to
* have only one MR(LKey) per a socket and per a hugepage-size even
* though the system memory is highly fragmented.
*/
*/
for (n = 0; n < ms_n; ++n) {
uintptr_t start;
- struct mlx5_mr_cache ret = { 0, };
+ struct mlx5_mr_cache ret;
+ memset(&ret, 0, sizeof(ret));
start = data_re.start + n * msl->page_sz;
/* Exclude memsegs already registered by other MRs. */
if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
* @return
* Searched LKey on success, UINT32_MAX on no match.
*/
-uint32_t
+static uint32_t
mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
{
struct mlx5_txq_ctrl *txq_ctrl =
return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
+/**
+ * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
+ * list, register the mempool of the mbuf as externally allocated memory.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param mb
+ * Pointer to mbuf.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
+{
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ uint32_t lkey;
+
+ lkey = mlx5_tx_addr2mr_bh(txq, addr);
+ if (lkey == UINT32_MAX && rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
+ }
+ return lkey;
+}
+
/**
* Flush all of the local cache entries.
*
tmpl->rxq.mp = mp;
tmpl->rxq.stats.idx = idx;
tmpl->rxq.elts_n = log2above(desc);
+ tmpl->rxq.rq_repl_thresh =
+ MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
tmpl->rxq.elts =
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
#ifndef RTE_ARCH_64
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- struct mlx5dv_qp_init_attr qp_init_attr = {0};
+ struct mlx5dv_qp_init_attr qp_init_attr;
#endif
int err;
return NULL;
}
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ memset(&qp_init_attr, 0, sizeof(qp_init_attr));
if (tunnel) {
qp_init_attr.comp_mask =
MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
uint16_t consumed_strd; /* Number of consumed strides in WQE. */
uint32_t rq_pi;
uint32_t cq_ci;
+ uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
volatile void *wqes;
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
-uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
+uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb);
uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
struct rte_mempool *mp);
* Address of the lock to use for that UAR access.
*/
static __rte_always_inline void
-__mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr,
+__mlx5_uar_write64_relaxed(uint64_t val, void *addr,
rte_spinlock_t *lock __rte_unused)
{
#ifdef RTE_ARCH_64
- rte_write64_relaxed(val, addr);
+ *(uint64_t *)addr = val;
#else /* !RTE_ARCH_64 */
rte_spinlock_lock(lock);
- rte_write32_relaxed(val, addr);
+ *(uint32_t *)addr = val;
rte_io_wmb();
- rte_write32_relaxed(val >> 32,
- (volatile void *)((volatile char *)addr + 4));
+ *((uint32_t *)addr + 1) = val >> 32;
rte_spinlock_unlock(lock);
#endif
}
* Address of the lock to use for that UAR access.
*/
static __rte_always_inline void
-__mlx5_uar_write64(uint64_t val, volatile void *addr, rte_spinlock_t *lock)
+__mlx5_uar_write64(uint64_t val, void *addr, rte_spinlock_t *lock)
{
rte_io_wmb();
__mlx5_uar_write64_relaxed(val, addr, lock);
* @return
* Memory pool where data is located for given mbuf.
*/
-static struct rte_mempool *
+static inline struct rte_mempool *
mlx5_mb2mp(struct rte_mbuf *buf)
{
if (unlikely(RTE_MBUF_INDIRECT(buf)))
* Searched LKey on success, UINT32_MAX on no match.
*/
static __rte_always_inline uint32_t
-mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
{
struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
uint32_t lkey;
/* Check generation bit to see if there's any change on existing MRs. */
MLX5_MR_CACHE_N, addr);
if (likely(lkey != UINT32_MAX))
return lkey;
- /* Take slower bottom-half (binary search) on miss. */
- return mlx5_tx_addr2mr_bh(txq, addr);
-}
-
-static __rte_always_inline uint32_t
-mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
-{
- uintptr_t addr = (uintptr_t)mb->buf_addr;
- uint32_t lkey = mlx5_tx_addr2mr(txq, addr);
-
- if (likely(lkey != UINT32_MAX))
- return lkey;
- if (rte_errno == ENXIO) {
- /* Mempool may have externally allocated memory. */
- lkey = mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
- }
- return lkey;
+ /* Take slower bottom-half on miss. */
+ return mlx5_tx_mb2mr_bh(txq, mb);
}
/**
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
+ if (repl_n >= rxq->rq_repl_thresh)
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
+ if (repl_n >= rxq->rq_repl_thresh)
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct priv *priv = dev->data->dev_private;
- struct rte_eth_stats tmp = {0};
+ struct rte_eth_stats tmp;
unsigned int i;
unsigned int idx;
+ memset(&tmp, 0, sizeof(tmp));
/* Add software counters. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
struct mrvl_priv *priv = dev->data->dev_private;
struct mrvl_mtr_profile *profile;
struct mrvl_mtr *mtr;
- int ret, enabled;
+ int ret, enabled = 0;
if (!priv->ppio)
return -rte_mtr_error_set(error, EPERM,
res = -EINVAL;
goto parse_error;
}
- if (pnum > qnum) {
- /*
- * We don't poll on event ports
- * that do not have any queues assigned.
- */
- pnum = qnum;
- PMD_INIT_LOG(INFO,
- "reducing number of active event ports to %d", pnum);
- }
+
+ /* Enable all queues available */
for (i = 0; i < qnum; i++) {
res = rte_event_queue_setup(evdev, i, NULL);
if (res < 0) {
}
}
+ /* Enable all ports available */
for (i = 0; i < pnum; i++) {
res = rte_event_port_setup(evdev, i, NULL);
if (res < 0) {
i, res);
goto parse_error;
}
+ }
+
+ /*
+ * Do 1:1 links for ports & queues. All queues would be mapped to
+ * one port. If there are more ports than queues, then some ports
+ * won't be linked to any queue.
+ */
+ for (i = 0; i < qnum; i++) {
/* Link one queue to one event port */
qlist = i;
res = rte_event_port_link(evdev, i, &qlist, NULL, 1);
};
struct pcap_rx_queue {
- pcap_t *pcap;
- uint16_t in_port;
+ uint16_t port_id;
+ uint16_t queue_id;
struct rte_mempool *mb_pool;
struct queue_stat rx_stat;
char name[PATH_MAX];
};
struct pcap_tx_queue {
- pcap_dumper_t *dumper;
- pcap_t *pcap;
+ uint16_t port_id;
+ uint16_t queue_id;
struct queue_stat tx_stat;
char name[PATH_MAX];
char type[ETH_PCAP_ARG_MAXLEN];
struct pmd_internals {
struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
+ char devargs[ETH_PCAP_ARG_MAXLEN];
struct ether_addr eth_addr;
int if_index;
int single_iface;
int phy_mac;
};
+struct pmd_process_private {
+ pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
+ pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
+ pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
+};
+
struct pmd_devargs {
unsigned int num_of_queue;
struct devargs_queue {
{
unsigned int i;
struct pcap_pkthdr header;
+ struct pmd_process_private *pp;
const u_char *packet;
struct rte_mbuf *mbuf;
struct pcap_rx_queue *pcap_q = queue;
uint16_t num_rx = 0;
uint16_t buf_size;
uint32_t rx_bytes = 0;
+ pcap_t *pcap;
+
+ pp = rte_eth_devices[pcap_q->port_id].process_private;
+ pcap = pp->rx_pcap[pcap_q->queue_id];
- if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0))
+ if (unlikely(pcap == NULL || nb_pkts == 0))
return 0;
/* Reads the given number of packets from the pcap file one by one
*/
for (i = 0; i < nb_pkts; i++) {
/* Get the next PCAP packet */
- packet = pcap_next(pcap_q->pcap, &header);
+ packet = pcap_next(pcap, &header);
if (unlikely(packet == NULL))
break;
}
mbuf->pkt_len = (uint16_t)header.caplen;
- mbuf->port = pcap_q->in_port;
+ mbuf->port = pcap_q->port_id;
bufs[num_rx] = mbuf;
num_rx++;
rx_bytes += header.caplen;
{
unsigned int i;
struct rte_mbuf *mbuf;
+ struct pmd_process_private *pp;
struct pcap_tx_queue *dumper_q = queue;
uint16_t num_tx = 0;
uint32_t tx_bytes = 0;
struct pcap_pkthdr header;
+ pcap_dumper_t *dumper;
+
+ pp = rte_eth_devices[dumper_q->port_id].process_private;
+ dumper = pp->tx_dumper[dumper_q->queue_id];
- if (dumper_q->dumper == NULL || nb_pkts == 0)
+ if (dumper == NULL || nb_pkts == 0)
return 0;
/* writes the nb_pkts packets to the previously opened pcap file
header.caplen = header.len;
if (likely(mbuf->nb_segs == 1)) {
- pcap_dump((u_char *)dumper_q->dumper, &header,
+ pcap_dump((u_char *)dumper, &header,
rte_pktmbuf_mtod(mbuf, void*));
} else {
if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
eth_pcap_gather_data(tx_pcap_data, mbuf);
- pcap_dump((u_char *)dumper_q->dumper, &header,
+ pcap_dump((u_char *)dumper, &header,
tx_pcap_data);
} else {
PMD_LOG(ERR,
* process stops and to make sure the pcap file is actually written,
* we flush the pcap dumper within each burst.
*/
- pcap_dump_flush(dumper_q->dumper);
+ pcap_dump_flush(dumper);
dumper_q->tx_stat.pkts += num_tx;
dumper_q->tx_stat.bytes += tx_bytes;
dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
unsigned int i;
int ret;
struct rte_mbuf *mbuf;
+ struct pmd_process_private *pp;
struct pcap_tx_queue *tx_queue = queue;
uint16_t num_tx = 0;
uint32_t tx_bytes = 0;
+ pcap_t *pcap;
- if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL))
+ pp = rte_eth_devices[tx_queue->port_id].process_private;
+ pcap = pp->tx_pcap[tx_queue->queue_id];
+
+ if (unlikely(nb_pkts == 0 || pcap == NULL))
return 0;
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
if (likely(mbuf->nb_segs == 1)) {
- ret = pcap_sendpacket(tx_queue->pcap,
+ ret = pcap_sendpacket(pcap,
rte_pktmbuf_mtod(mbuf, u_char *),
mbuf->pkt_len);
} else {
if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
eth_pcap_gather_data(tx_pcap_data, mbuf);
- ret = pcap_sendpacket(tx_queue->pcap,
+ ret = pcap_sendpacket(pcap,
tx_pcap_data, mbuf->pkt_len);
} else {
PMD_LOG(ERR,
{
unsigned int i;
struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_process_private *pp = dev->process_private;
struct pcap_tx_queue *tx;
struct pcap_rx_queue *rx;
tx = &internals->tx_queue[0];
rx = &internals->rx_queue[0];
- if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
- if (open_single_iface(tx->name, &tx->pcap) < 0)
+ if (!pp->tx_pcap[0] &&
+ strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
+ if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0)
return -1;
- rx->pcap = tx->pcap;
+ pp->rx_pcap[0] = pp->tx_pcap[0];
}
goto status_up;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
tx = &internals->tx_queue[i];
- if (!tx->dumper &&
+ if (!pp->tx_dumper[i] &&
strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
- if (open_single_tx_pcap(tx->name, &tx->dumper) < 0)
+ if (open_single_tx_pcap(tx->name,
+ &pp->tx_dumper[i]) < 0)
return -1;
- } else if (!tx->pcap &&
+ } else if (!pp->tx_pcap[i] &&
strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
- if (open_single_iface(tx->name, &tx->pcap) < 0)
+ if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0)
return -1;
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rx = &internals->rx_queue[i];
- if (rx->pcap != NULL)
+ if (pp->rx_pcap[i] != NULL)
continue;
if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
- if (open_single_rx_pcap(rx->name, &rx->pcap) < 0)
+ if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0)
return -1;
} else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
- if (open_single_iface(rx->name, &rx->pcap) < 0)
+ if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0)
return -1;
}
}
{
unsigned int i;
struct pmd_internals *internals = dev->data->dev_private;
- struct pcap_tx_queue *tx;
- struct pcap_rx_queue *rx;
+ struct pmd_process_private *pp = dev->process_private;
/* Special iface case. Single pcap is open and shared between tx/rx. */
if (internals->single_iface) {
- tx = &internals->tx_queue[0];
- rx = &internals->rx_queue[0];
- pcap_close(tx->pcap);
- tx->pcap = NULL;
- rx->pcap = NULL;
+ pcap_close(pp->tx_pcap[0]);
+ pp->tx_pcap[0] = NULL;
+ pp->rx_pcap[0] = NULL;
goto status_down;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- tx = &internals->tx_queue[i];
-
- if (tx->dumper != NULL) {
- pcap_dump_close(tx->dumper);
- tx->dumper = NULL;
+ if (pp->tx_dumper[i] != NULL) {
+ pcap_dump_close(pp->tx_dumper[i]);
+ pp->tx_dumper[i] = NULL;
}
- if (tx->pcap != NULL) {
- pcap_close(tx->pcap);
- tx->pcap = NULL;
+ if (pp->tx_pcap[i] != NULL) {
+ pcap_close(pp->tx_pcap[i]);
+ pp->tx_pcap[i] = NULL;
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rx = &internals->rx_queue[i];
-
- if (rx->pcap != NULL) {
- pcap_close(rx->pcap);
- rx->pcap = NULL;
+ if (pp->rx_pcap[i] != NULL) {
+ pcap_close(pp->rx_pcap[i]);
+ pp->rx_pcap[i] = NULL;
}
}
struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
pcap_q->mb_pool = mb_pool;
+ pcap_q->port_id = dev->data->port_id;
+ pcap_q->queue_id = rx_queue_id;
dev->data->rx_queues[rx_queue_id] = pcap_q;
- pcap_q->in_port = dev->data->port_id;
return 0;
}
const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct pmd_internals *internals = dev->data->dev_private;
+ struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
- dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
+ pcap_q->port_id = dev->data->port_id;
+ pcap_q->queue_id = tx_queue_id;
+ dev->data->tx_queues[tx_queue_id] = pcap_q;
return 0;
}
struct rte_eth_dev **eth_dev)
{
struct rte_eth_dev_data *data;
+ struct pmd_process_private *pp;
unsigned int numa_node = vdev->device.numa_node;
PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
numa_node);
+ pp = (struct pmd_process_private *)
+ rte_zmalloc(NULL, sizeof(struct pmd_process_private),
+ RTE_CACHE_LINE_SIZE);
+
+ if (pp == NULL) {
+ PMD_LOG(ERR,
+ "Failed to allocate memory for process private");
+ return -1;
+ }
+
/* reserve an ethdev entry */
*eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
- if (!(*eth_dev))
+ if (!(*eth_dev)) {
+ rte_free(pp);
return -1;
-
+ }
+ (*eth_dev)->process_private = pp;
/* now put it all together
* - store queue data in internals,
* - store numa_node info in eth_dev
*/
(*eth_dev)->dev_ops = &ops;
+ strlcpy((*internals)->devargs, rte_vdev_device_args(vdev),
+ ETH_PCAP_ARG_MAXLEN);
+
return 0;
}
struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
{
+ struct pmd_process_private *pp;
unsigned int i;
/* do some parameter checking */
eth_dev) < 0)
return -1;
+ pp = (*eth_dev)->process_private;
for (i = 0; i < nb_rx_queues; i++) {
struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
struct devargs_queue *queue = &rx_queues->queue[i];
- rx->pcap = queue->pcap;
+ pp->rx_pcap[i] = queue->pcap;
snprintf(rx->name, sizeof(rx->name), "%s", queue->name);
snprintf(rx->type, sizeof(rx->type), "%s", queue->type);
}
struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
struct devargs_queue *queue = &tx_queues->queue[i];
- tx->dumper = queue->dumper;
- tx->pcap = queue->pcap;
+ pp->tx_dumper[i] = queue->dumper;
+ pp->tx_pcap[i] = queue->pcap;
snprintf(tx->name, sizeof(tx->name), "%s", queue->name);
snprintf(tx->type, sizeof(tx->type), "%s", queue->type);
}
struct rte_kvargs *kvlist;
struct pmd_devargs pcaps = {0};
struct pmd_devargs dumpers = {0};
- struct rte_eth_dev *eth_dev;
+ struct rte_eth_dev *eth_dev = NULL;
+ struct pmd_internals *internal;
int single_iface = 0;
int ret;
PMD_LOG(ERR, "Failed to probe %s", name);
return -1;
}
- /* TODO: request info from primary to set up Rx and Tx */
- eth_dev->dev_ops = &ops;
- eth_dev->device = &dev->device;
- rte_eth_dev_probing_finish(eth_dev);
- return 0;
- }
- kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
- if (kvlist == NULL)
- return -1;
+ internal = eth_dev->data->dev_private;
+
+ kvlist = rte_kvargs_parse(internal->devargs, valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+ } else {
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
+ valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+ }
/*
* If iface argument is passed we open the NICs and use them for
goto free_kvlist;
create_eth:
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ struct pmd_process_private *pp;
+ unsigned int i;
+
+ internal = eth_dev->data->dev_private;
+ pp = (struct pmd_process_private *)
+ rte_zmalloc(NULL,
+ sizeof(struct pmd_process_private),
+ RTE_CACHE_LINE_SIZE);
+
+ if (pp == NULL) {
+ PMD_LOG(ERR,
+ "Failed to allocate memory for process private");
+ return -1;
+ }
+
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+
+ /* setup process private */
+ for (i = 0; i < pcaps.num_of_queue; i++)
+ pp->rx_pcap[i] = pcaps.queue[i].pcap;
+
+ for (i = 0; i < dumpers.num_of_queue; i++) {
+ pp->tx_dumper[i] = dumpers.queue[i].dumper;
+ pp->tx_pcap[i] = dumpers.queue[i].pcap;
+ }
+
+ eth_dev->process_private = pp;
+ eth_dev->rx_pkt_burst = eth_pcap_rx;
+ if (is_tx_pcap)
+ eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
+ else
+ eth_dev->tx_pkt_burst = eth_pcap_tx;
+
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
dumpers.num_of_queue, single_iface, is_tx_pcap);
eth_dev->data->mac_addrs = NULL;
}
+ rte_free(eth_dev->process_private);
rte_eth_dev_release_port(eth_dev);
return 0;
hdr->tx_offload = pkt->tx_offload;
hdr->hash = pkt->hash;
- hdr->ol_flags = pkt->ol_flags;
-
__rte_mbuf_sanity_check(hdr, 1);
return hdr;
}
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include <unistd.h>
#include <stdbool.h>
#include <assert.h>
struct lcore_option *lo = &options.los[i];
struct vhost_crypto_info *info = options.infos[i];
+ if (!info)
+ continue;
+
rte_mempool_free(info->cop_pool);
rte_mempool_free(info->sess_pool);
info->nb_vids = lo->nb_sockets;
rte_cryptodev_info_get(info->cid, &dev_info);
+ if (options.zero_copy == RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE) {
+#define VHOST_CRYPTO_CDEV_NAME_AESNI_MB_PMD crypto_aesni_mb
+#define VHOST_CRYPTO_CDEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
+ if (strstr(dev_info.driver_name,
+ RTE_STR(VHOST_CRYPTO_CDEV_NAME_AESNI_MB_PMD)) ||
+ strstr(dev_info.driver_name,
+ RTE_STR(VHOST_CRYPTO_CDEV_NAME_AESNI_GCM_PMD)))
+ RTE_LOG(ERR, USER1, "Cannot enable zero-copy in %s\n",
+ dev_info.driver_name);
+ ret = -EPERM;
+ goto error_exit;
+ }
+
if (dev_info.max_nb_queue_pairs < info->qid + 1) {
RTE_LOG(ERR, USER1, "Number of queues cannot over %u",
dev_info.max_nb_queue_pairs);
const struct rte_memseg *ms;
/* for IOVA_VA, it's always contiguous */
- if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ if (rte_eal_iova_mode() == RTE_IOVA_VA && !msl->external)
return true;
/* for legacy memory, it's always contiguous */
int
eal_create_runtime_dir(void);
-/* returns runtime dir */
-const char *
-rte_eal_get_runtime_dir(void);
-
#define RUNTIME_CONFIG_FNAME "config"
static inline const char *
eal_runtime_config_path(void)
* 0-15 = release candidates
* 16 = release
*/
-#define RTE_VER_RELEASE 3
+#define RTE_VER_RELEASE 4
/**
* Macro to compute a version number usable for comparisons
static int
next_elem_is_adjacent(struct malloc_elem *elem)
{
- return elem->next == RTE_PTR_ADD(elem, elem->size);
+ return elem->next == RTE_PTR_ADD(elem, elem->size) &&
+ elem->next->msl == elem->msl;
}
static int
prev_elem_is_adjacent(struct malloc_elem *elem)
{
- return elem == RTE_PTR_ADD(elem->prev, elem->prev->size);
+ return elem == RTE_PTR_ADD(elem->prev, elem->prev->size) &&
+ elem->prev->msl == elem->msl;
}
/*
if (bus == NULL) {
RTE_LOG(ERR, EAL, "Cannot find bus (%s)\n",
busname);
- return;
+ goto failure_handle_err;
}
dev = bus->find_device(NULL, cmp_dev_name,
if (dev == NULL) {
RTE_LOG(ERR, EAL, "Cannot find device (%s) on "
"bus (%s)\n", uevent.devname, busname);
- return;
+ goto failure_handle_err;
}
ret = bus->hot_unplug_handler(dev);
- rte_spinlock_unlock(&failure_handle_lock);
if (ret) {
RTE_LOG(ERR, EAL, "Can not handle hot-unplug "
"for device (%s)\n", dev->name);
- return;
}
+ rte_spinlock_unlock(&failure_handle_lock);
}
rte_dev_event_callback_process(uevent.devname, uevent.type);
}
+
+ return;
+
+failure_handle_err:
+ rte_spinlock_unlock(&failure_handle_lock);
}
int __rte_experimental
munmap(addr, alloc_sz);
unmapped:
flags = MAP_FIXED;
-#ifdef RTE_ARCH_PPC_64
- flags |= MAP_HUGETLB;
-#endif
new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
if (new_addr != addr) {
if (new_addr != NULL)
void *addr;
int flags = 0;
-#ifdef RTE_ARCH_PPC_64
- flags |= MAP_HUGETLB;
-#endif
-
page_sz = msl->page_sz;
mem_sz = page_sz * msl->memseg_arr.len;
offline_cpu_socket, 0);
if (r == NULL) {
RTE_LOG(ERR, EFD, "memory allocation failed\n");
- goto error_unlock_exit;
+ rte_efd_free(table);
+ return NULL;
}
/* Populate free slots ring. Entry zero is reserved for key misses. */
LIBABIVER := 1
# build flags
-CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal -lrte_mempool
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-allow_experimental_apis = true
sources = files('rte_security.c')
headers = files('rte_security.h', 'rte_security_driver.h')
deps += ['mempool', 'cryptodev']
#include "rte_security_driver.h"
struct rte_security_session *
-__rte_experimental rte_security_session_create(struct rte_security_ctx *instance,
+rte_security_session_create(struct rte_security_ctx *instance,
struct rte_security_session_conf *conf,
struct rte_mempool *mp)
{
return sess;
}
-int __rte_experimental
+int
rte_security_session_update(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_security_session_conf *conf)
return instance->ops->session_update(instance->device, sess, conf);
}
-unsigned int __rte_experimental
+unsigned int
rte_security_session_get_size(struct rte_security_ctx *instance)
{
RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_get_size, 0);
return instance->ops->session_get_size(instance->device);
}
-int __rte_experimental
+int
rte_security_session_stats_get(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_security_stats *stats)
return instance->ops->session_stats_get(instance->device, sess, stats);
}
-int __rte_experimental
+int
rte_security_session_destroy(struct rte_security_ctx *instance,
struct rte_security_session *sess)
{
return ret;
}
-int __rte_experimental
+int
rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_mbuf *m, void *params)
sess, m, params);
}
-void * __rte_experimental
+void *
rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md)
{
void *userdata = NULL;
return userdata;
}
-const struct rte_security_capability * __rte_experimental
+const struct rte_security_capability *
rte_security_capabilities_get(struct rte_security_ctx *instance)
{
RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->capabilities_get, NULL);
return instance->ops->capabilities_get(instance->device);
}
-const struct rte_security_capability * __rte_experimental
+const struct rte_security_capability *
rte_security_capability_get(struct rte_security_ctx *instance,
struct rte_security_capability_idx *idx)
{
/**
* @file rte_security.h
- * @b EXPERIMENTAL: this API may change without prior notice
*
* RTE Security Common Definitions
*
* - On success, pointer to session
* - On failure, NULL
*/
-struct rte_security_session * __rte_experimental
+struct rte_security_session *
rte_security_session_create(struct rte_security_ctx *instance,
struct rte_security_session_conf *conf,
struct rte_mempool *mp);
* - On success returns 0
* - On failure return errno
*/
-int __rte_experimental
+int
rte_security_session_update(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_security_session_conf *conf);
* - Size of the private data, if successful
* - 0 if device is invalid or does not support the operation.
*/
-unsigned int __rte_experimental
+unsigned int
rte_security_session_get_size(struct rte_security_ctx *instance);
/**
* - -EINVAL if session is NULL.
* - -EBUSY if not all device private data has been freed.
*/
-int __rte_experimental
+int
rte_security_session_destroy(struct rte_security_ctx *instance,
struct rte_security_session *sess);
* - On success, zero.
* - On failure, a negative value.
*/
-int __rte_experimental
+int
rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_mbuf *mb, void *params);
* - On success, userdata
* - On failure, NULL
*/
-void * __rte_experimental
+void *
rte_security_get_userdata(struct rte_security_ctx *instance, uint64_t md);
/**
* @param sym_op crypto operation
* @param sess security session
*/
-static inline int __rte_experimental
+static inline int
__rte_security_attach_session(struct rte_crypto_sym_op *sym_op,
struct rte_security_session *sess)
{
return 0;
}
-static inline void * __rte_experimental
+static inline void *
get_sec_session_private_data(const struct rte_security_session *sess)
{
return sess->sess_private_data;
}
-static inline void __rte_experimental
+static inline void
set_sec_session_private_data(struct rte_security_session *sess,
void *private_data)
{
* @param op crypto operation
* @param sess security session
*/
-static inline int __rte_experimental
+static inline int
rte_security_attach_session(struct rte_crypto_op *op,
struct rte_security_session *sess)
{
* - On success return 0
* - On failure errno
*/
-int __rte_experimental
+int
rte_security_session_stats_get(struct rte_security_ctx *instance,
struct rte_security_session *sess,
struct rte_security_stats *stats);
* - Returns array of security capabilities.
* - Return NULL if no capabilities available.
*/
-const struct rte_security_capability * __rte_experimental
+const struct rte_security_capability *
rte_security_capabilities_get(struct rte_security_ctx *instance);
/**
* index criteria.
* - Return NULL if the capability not matched on security instance.
*/
-const struct rte_security_capability * __rte_experimental
+const struct rte_security_capability *
rte_security_capability_get(struct rte_security_ctx *instance,
struct rte_security_capability_idx *idx);
/**
* @file rte_security_driver.h
- * @b EXPERIMENTAL: this API may change without prior notice
*
* RTE Security Common Definitions
*
-EXPERIMENTAL {
+DPDK_18.11 {
global:
rte_security_attach_session;
# Copyright(c) 2017 Intel Corporation
project('DPDK', 'C',
- version: '18.11.0-rc3',
+ version: '18.11.0-rc4',
license: 'BSD',
default_options: ['buildtype=release', 'default_library=static'],
meson_version: '>= 0.41'
* 2. try to run secondary process without a corresponding primary process
* (while failing to run, it will also remove any unused hugepage files)
* 3. check if current process hugefiles are still in place and are locked
- * 4. run a primary process with memtest1 prefix
- * 5. check if memtest1 hugefiles are created
- * 6. run a primary process with memtest2 prefix
- * 7. check that only memtest2 hugefiles are present in the hugedir
+ * 4. run a primary process with memtest1 prefix in default and legacy
+ * mem mode
+ * 5. check if memtest1 hugefiles are created in case of legacy mem
+ * mode, and deleted in case of default mem mode
+ * 6. run a primary process with memtest2 prefix in default and legacy
+ * mem modes
+ * 7. check that memtest2 hugefiles are present in the hugedir after a
+ * run in legacy mode, and not present at all after run in default
+ * mem mode
*/
char prefix[PATH_MAX] = "";
const char *argv0[] = {prgname, mp_flag, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
"--file-prefix=" memtest };
- /* primary process with memtest1 */
- const char *argv1[] = {prgname, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
- "--file-prefix=" memtest1 };
+ /* primary process with memtest1 and default mem mode */
+ const char *argv1[] = {prgname, "-c", "1", "-n", "2", "-m",
+ DEFAULT_MEM_SIZE, "--file-prefix=" memtest1 };
- /* primary process with memtest2 */
- const char *argv2[] = {prgname, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
- "--file-prefix=" memtest2 };
+ /* primary process with memtest1 and legacy mem mode */
+ const char *argv2[] = {prgname, "-c", "1", "-n", "2", "-m",
+ DEFAULT_MEM_SIZE, "--file-prefix=" memtest1,
+ "--legacy-mem" };
+
+ /* primary process with memtest2 and legacy mem mode */
+ const char *argv3[] = {prgname, "-c", "1", "-n", "2", "-m",
+ DEFAULT_MEM_SIZE, "--file-prefix=" memtest2,
+ "--legacy-mem" };
+
+ /* primary process with memtest2 and default mem mode */
+ const char *argv4[] = {prgname, "-c", "1", "-n", "2", "-m",
+ DEFAULT_MEM_SIZE, "--file-prefix=" memtest2 };
/* check if files for current prefix are present */
if (process_hugefiles(prefix, HUGEPAGE_CHECK_EXISTS) != 1) {
return -1;
}
+ /* we're running this process in default memory mode, which means it
+ * should clean up after itself on exit and leave no hugepages behind.
+ */
if (launch_proc(argv1) != 0) {
- printf("Error - failed to run with --file-prefix=%s\n", memtest);
+ printf("Error - failed to run with --file-prefix=%s\n",
+ memtest1);
return -1;
}
/* check if memtest1_map0 is present */
- if (process_hugefiles(memtest1, HUGEPAGE_CHECK_EXISTS) != 1) {
- printf("Error - hugepage files for %s were not created!\n", memtest1);
+ if (process_hugefiles(memtest1, HUGEPAGE_CHECK_EXISTS) != 0) {
+ printf("Error - hugepage files for %s were not deleted!\n",
+ memtest1);
return -1;
}
+ /* now, we're running a process under the same prefix, but with legacy
+ * mem mode - this should leave behind hugepage files.
+ */
if (launch_proc(argv2) != 0) {
- printf("Error - failed to run with --file-prefix=%s\n", memtest2);
+ printf("Error - failed to run with --file-prefix=%s\n",
+ memtest1);
+ return -1;
+ }
+
+ /* check if memtest1_map0 is present */
+ if (process_hugefiles(memtest1, HUGEPAGE_CHECK_EXISTS) != 1) {
+ printf("Error - hugepage files for %s were not created!\n",
+ memtest1);
+ return -1;
+ }
+
+ if (launch_proc(argv3) != 0) {
+ printf("Error - failed to run with --file-prefix=%s\n",
+ memtest2);
return -1;
}
/* check if hugefiles for memtest2 are present */
if (process_hugefiles(memtest2, HUGEPAGE_CHECK_EXISTS) != 1) {
- printf("Error - hugepage files for %s were not created!\n", memtest2);
+ printf("Error - hugepage files for %s were not created!\n",
+ memtest2);
+ return -1;
+ }
+
+ /* check if hugefiles for memtest1 are present */
+ if (process_hugefiles(memtest1, HUGEPAGE_CHECK_EXISTS) != 0) {
+ printf("Error - hugepage files for %s were not deleted!\n",
+ memtest1);
+ return -1;
+ }
+
+ /* this process will run in default mem mode, so it should not leave any
+ * hugepage files behind.
+ */
+ if (launch_proc(argv4) != 0) {
+ printf("Error - failed to run with --file-prefix=%s\n",
+ memtest2);
+ return -1;
+ }
+
+ /* check if hugefiles for memtest2 are present */
+ if (process_hugefiles(memtest2, HUGEPAGE_CHECK_EXISTS) != 0) {
+ printf("Error - hugepage files for %s were not deleted!\n",
+ memtest2);
return -1;
}
/* check if hugefiles for memtest1 are present */
if (process_hugefiles(memtest1, HUGEPAGE_CHECK_EXISTS) != 0) {
- printf("Error - hugepage files for %s were not deleted!\n", memtest1);
+ printf("Error - hugepage files for %s were not deleted!\n",
+ memtest1);
return -1;
}
printf("Check valid alarm interrupt full path\n");
if (test_interrupt_full_path_check(
- TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT) < 0) {
+ TEST_INTERRUPT_HANDLE_VALID_ALARM) < 0) {
printf("failure occurred during checking valid alarm "
"interrupt full path\n");
goto out;
#include <rte_launch.h>
#include <rte_ethdev.h>
#include <rte_eth_ring.h>
+#include <rte_bus_vdev.h>
#include "test.h"
static int
test_ring_pmd_perf(void)
{
+ char name[RTE_ETH_NAME_MAX_LEN];
+
r = rte_ring_create(RING_NAME, RING_SIZE, rte_socket_id(),
RING_F_SP_ENQ|RING_F_SC_DEQ);
if (r == NULL && (r = rte_ring_lookup(RING_NAME)) == NULL)
printf("\n### Testing using a single lcore ###\n");
test_bulk_enqueue_dequeue();
+ /* release port and ring resources */
+ rte_eth_dev_stop(ring_ethdev_port);
+ rte_eth_dev_get_name_by_port(ring_ethdev_port, name);
+ rte_vdev_uninit(name);
+ rte_ring_free(r);
return 0;
}
"Power management environment\n",
TEST_POWER_VM_LCORE_ID);
rte_power_unset_env();
- return -1;
+ return TEST_SKIPPED;
}
/* Test initialisation of previously initialised lcore */
def main():
'''program main function'''
+ # check if lspci is installed, suppress any output
+ with open(os.devnull, 'w') as devnull:
+ ret = subprocess.call(['which', 'lspci'],
+ stdout=devnull, stderr=devnull)
+ if ret != 0:
+ print("'lspci' not found - please install 'pciutils'")
+ sys.exit(1)
parse_args()
check_modules()
clear_data()