It is well known that for some NICs, masking
interrupts results in delaying desc writebacks,
which breaks poll mode. This fix introduces an
"int-unmaskable" dpdk device flag to identify such
devices (typically Intel FVL). For such devices,
interrupts are masked by a call to
file_update(...,UNIX_FILE_UPDATE_DELETE) instead
of rte_eth_dev_rx_intr_disable (...)
Change-Id: Ifbc701aebe8572319b7aae19382bd683a47fc3cf
Type: fix
Fixes:
19ff0c3699342b512c03362b3815df684a661f49
Signed-off-by: Mohammed Hawari <mohammed@hawari.fr>
else
{
xd->flags |= DPDK_DEVICE_FLAG_INT_SUPPORTED;
- rte_eth_dev_rx_intr_disable (xd->port_id, 0);
+ if (!(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
+ rte_eth_dev_rx_intr_disable (xd->port_id, 0);
dpdk_log_info ("Probe for interrupt mode for device %U. Success.\n",
format_dpdk_device_name, xd->port_id);
}
rxq->clib_file_index = clib_file_add (&file_main, &f);
vnet_hw_if_set_rx_queue_file_index (vnm, rxq->queue_index,
rxq->clib_file_index);
+ if (xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE)
+ {
+ clib_file_main_t *fm = &file_main;
+ clib_file_t *f =
+ pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
+ fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
+ }
}
}
vnet_hw_if_update_runtime_data (vnm, xd->hw_if_index);
#include <dpdk/device/dpdk.h>
#include <dpdk/device/dpdk_priv.h>
#include <vppinfra/error.h>
+#include <vlib/unix/unix.h>
#define foreach_dpdk_tx_func_error \
_(BAD_RETVAL, "DPDK tx function returned an error") \
dpdk_main_t *xm = &dpdk_main;
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
+ clib_file_main_t *fm = &file_main;
+ dpdk_rx_queue_t *rxq;
+ clib_file_t *f;
int rv = 0;
if (!(xd->flags & DPDK_DEVICE_FLAG_INT_SUPPORTED))
return clib_error_return (0, "unsupported op (is the interface up?)", rv);
- if (mode == VNET_HW_IF_RX_MODE_POLLING)
+ if (mode == VNET_HW_IF_RX_MODE_POLLING &&
+ !(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
rv = rte_eth_dev_rx_intr_disable (xd->port_id, qid);
- else
+ else if (mode == VNET_HW_IF_RX_MODE_POLLING)
+ {
+ rxq = vec_elt_at_index (xd->rx_queues, qid);
+ f = pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
+ fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
+ }
+ else if (!(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
rv = rte_eth_dev_rx_intr_enable (xd->port_id, qid);
+ else
+ {
+ rxq = vec_elt_at_index (xd->rx_queues, qid);
+ f = pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
+ fm->file_update (f, UNIX_FILE_UPDATE_ADD);
+ }
if (rv)
return clib_error_return (0, "dpdk_interface_rx_mode_change err %d", rv);
return 0;
_ (10, INTEL_PHDR_CKSUM, "intel-phdr-cksum") \
_ (11, RX_FLOW_OFFLOAD, "rx-flow-offload") \
_ (12, RX_IP4_CKSUM, "rx-ip4-cksum") \
- _ (13, INT_SUPPORTED, "int-supported")
+ _ (13, INT_SUPPORTED, "int-supported") \
+ _ (14, INT_UNMASKABLE, "int-unmaskable")
enum
{
switch (xd->pmd)
{
/* Drivers with valid speed_capa set */
+ case VNET_DPDK_PMD_I40E:
+ xd->flags |= DPDK_DEVICE_FLAG_INT_UNMASKABLE;
case VNET_DPDK_PMD_E1000EM:
case VNET_DPDK_PMD_IGB:
case VNET_DPDK_PMD_IXGBE:
- case VNET_DPDK_PMD_I40E:
case VNET_DPDK_PMD_ICE:
xd->port_type = port_type_from_speed_capa (&dev_info);
xd->supported_flow_actions = VNET_FLOW_ACTION_MARK |
}
xd->port_conf.intr_conf.rxq = 1;
-
break;
case VNET_DPDK_PMD_CXGBE:
case VNET_DPDK_PMD_MLX4:
break;
/* SR-IOV VFs */
+ case VNET_DPDK_PMD_I40EVF:
+ xd->flags |= DPDK_DEVICE_FLAG_INT_UNMASKABLE;
case VNET_DPDK_PMD_IGBVF:
case VNET_DPDK_PMD_IXGBEVF:
- case VNET_DPDK_PMD_I40EVF:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
if (dm->conf->no_tx_checksum_offload == 0)
{
DPDK_DEVICE_FLAG_TX_OFFLOAD |
DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM;
}
- /*xd->port_conf.intr_conf.rxq = 1;*/
+ /* sDPDK bug in multiqueue... */
+ /* xd->port_conf.intr_conf.rxq = 1; */
break;
/* iAVF */
case VNET_DPDK_PMD_IAVF:
- xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
+ xd->flags |= DPDK_DEVICE_FLAG_INT_UNMASKABLE;
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
xd->supported_flow_actions = VNET_FLOW_ACTION_MARK |
VNET_FLOW_ACTION_REDIRECT_TO_NODE |
VNET_FLOW_ACTION_REDIRECT_TO_QUEUE |