#include <vlib/log.h>
#include <vnet/ethernet/ethernet.h>
+#include <vnet/interface/rx_queue_funcs.h>
#include <dpdk/buffer.h>
#include <dpdk/device/dpdk.h>
#include <dpdk/cryptodev/cryptodev.h>
#include <vlib/vmbus/vmbus.h>
#include <rte_ring.h>
+#include <rte_vect.h>
#include <stdio.h>
#include <stdlib.h>
if (unformat (&input_vmbus, "%U", unformat_vlib_vmbus_addr,
&vmbus_addr))
{
- p = hash_get (dm->conf->device_config_index_by_vmbus_addr,
- vmbus_addr.as_u32[0]);
+ p = mhash_get (&dm->conf->device_config_index_by_vmbus_addr,
+ &vmbus_addr);
}
}
else
xd->rx_q_used = 1;
+ vec_validate_aligned (xd->rx_queues, xd->rx_q_used - 1,
+ CLIB_CACHE_LINE_BYTES);
+
xd->flags |= DPDK_DEVICE_FLAG_PMD;
/* workaround for drivers not setting driver_name */
switch (xd->pmd)
{
/* Drivers with valid speed_capa set */
+ case VNET_DPDK_PMD_I40E:
+ xd->flags |= DPDK_DEVICE_FLAG_INT_UNMASKABLE;
case VNET_DPDK_PMD_E1000EM:
case VNET_DPDK_PMD_IGB:
+ case VNET_DPDK_PMD_IGC:
case VNET_DPDK_PMD_IXGBE:
- case VNET_DPDK_PMD_I40E:
case VNET_DPDK_PMD_ICE:
xd->port_type = port_type_from_speed_capa (&dev_info);
xd->supported_flow_actions = VNET_FLOW_ACTION_MARK |
DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM;
}
-
+ xd->port_conf.intr_conf.rxq = 1;
break;
case VNET_DPDK_PMD_CXGBE:
case VNET_DPDK_PMD_MLX4:
break;
/* SR-IOV VFs */
+ case VNET_DPDK_PMD_I40EVF:
+ xd->flags |= DPDK_DEVICE_FLAG_INT_UNMASKABLE;
case VNET_DPDK_PMD_IGBVF:
case VNET_DPDK_PMD_IXGBEVF:
- case VNET_DPDK_PMD_I40EVF:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
if (dm->conf->no_tx_checksum_offload == 0)
{
DPDK_DEVICE_FLAG_TX_OFFLOAD |
DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM;
}
+ /* DPDK bug in multiqueue... */
+ /* xd->port_conf.intr_conf.rxq = 1; */
break;
/* iAVF */
case VNET_DPDK_PMD_IAVF:
- xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
- xd->supported_flow_actions = VNET_FLOW_ACTION_MARK |
- VNET_FLOW_ACTION_REDIRECT_TO_NODE |
+ xd->flags |= DPDK_DEVICE_FLAG_INT_UNMASKABLE;
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
+ xd->supported_flow_actions =
+ VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
VNET_FLOW_ACTION_REDIRECT_TO_QUEUE |
- VNET_FLOW_ACTION_BUFFER_ADVANCE |
- VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP;
+ VNET_FLOW_ACTION_BUFFER_ADVANCE | VNET_FLOW_ACTION_COUNT |
+ VNET_FLOW_ACTION_DROP | VNET_FLOW_ACTION_RSS;
if (dm->conf->no_tx_checksum_offload == 0)
{
DPDK_DEVICE_FLAG_TX_OFFLOAD |
DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM;
}
- break;
+ /* DPDK bug in multiqueue... */
+ /* xd->port_conf.intr_conf.rxq = 1; */
+ break;
case VNET_DPDK_PMD_THUNDERX:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
case VNET_DPDK_PMD_ENA:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_SCATTER;
+ xd->port_conf.intr_conf.rxq = 1;
break;
case VNET_DPDK_PMD_DPAA2:
sw = vnet_get_hw_sw_interface (dm->vnet_main, xd->hw_if_index);
xd->sw_if_index = sw->sw_if_index;
- vnet_hw_interface_set_input_node (dm->vnet_main, xd->hw_if_index,
- dpdk_input_node.index);
+ vnet_hw_if_set_input_node (dm->vnet_main, xd->hw_if_index,
+ dpdk_input_node.index);
if (devconf->workers)
{
int i;
q = 0;
clib_bitmap_foreach (i, devconf->workers) {
- vnet_hw_interface_assign_rx_thread (dm->vnet_main, xd->hw_if_index, q++,
- vdm->first_worker_thread_index + i);
+ dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, q);
+ rxq->queue_index = vnet_hw_if_register_rx_queue (
+ dm->vnet_main, xd->hw_if_index, q++,
+ vdm->first_worker_thread_index + i);
}
}
else
for (q = 0; q < xd->rx_q_used; q++)
{
- vnet_hw_interface_assign_rx_thread (dm->vnet_main, xd->hw_if_index, q, /* any */
- ~1);
+ dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, q);
+ rxq->queue_index = vnet_hw_if_register_rx_queue (
+ dm->vnet_main, xd->hw_if_index, q, VNET_HW_IF_RXQ_THREAD_ANY);
}
+ vnet_hw_if_update_runtime_data (dm->vnet_main, xd->hw_if_index);
+
/*Get vnet hardware interface */
hi = vnet_get_hw_interface (dm->vnet_main, xd->hw_if_index);
/* Indicate ability to support L3 DMAC filtering and
* initialize interface to L3 non-promisc mode */
- hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER;
+ hi->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_MAC_FILTER;
ethernet_set_flags (dm->vnet_main, xd->hw_if_index,
ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
}
if (dm->conf->no_tx_checksum_offload == 0)
if (xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD && hi != NULL)
- hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD;
-
+ {
+ hi->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_TX_IP4_CKSUM |
+ VNET_HW_INTERFACE_CAP_SUPPORTS_TX_TCP_CKSUM |
+ VNET_HW_INTERFACE_CAP_SUPPORTS_TX_UDP_CKSUM;
+ }
if (devconf->tso == DPDK_DEVICE_TSO_ON && hi != NULL)
{
/*tcp_udp checksum must be enabled*/
if ((dm->conf->enable_tcp_udp_checksum) &&
- (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD))
+ (hi->caps & VNET_HW_INTERFACE_CAP_SUPPORTS_TX_CKSUM))
{
- hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_UDP_TSO;
+ hi->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO |
+ VNET_HW_INTERFACE_CAP_SUPPORTS_UDP_GSO;
+ xd->port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_UDP_TSO;
}
else
clib_warning ("%s: TCP/UDP checksum offload must be enabled",
dpdk_device_config_t *devconf = 0;
if (num_whitelisted)
{
- uword *p = hash_get (conf->device_config_index_by_vmbus_addr,
- addr->as_u32[0]);
+ uword *p =
+ mhash_get (&conf->device_config_index_by_vmbus_addr, addr);
if (!p)
{
/* No devices blacklisted, but have whitelisted. blacklist all
* non-whitelisted */
pool_get (conf->dev_confs, devconf);
- hash_set (conf->device_config_index_by_vmbus_addr,
- addr->as_u32[0], devconf - conf->dev_confs);
+ mhash_set (&conf->device_config_index_by_vmbus_addr, addr,
+ devconf - conf->dev_confs, 0);
devconf->vmbus_addr = *addr;
devconf->dev_addr_type = VNET_DEV_ADDR_VMBUS;
devconf->is_blacklisted = 1;
/* Enforce Device blacklist by vmbus_addr */
for (i = 0; i < vec_len (conf->blacklist_by_vmbus_addr); i++)
{
- u32 vmbus_as_u32 = conf->blacklist_by_vmbus_addr[i];
- if (vmbus_as_u32 == addr->as_u32[0])
+ vlib_vmbus_addr_t *a1 = &conf->blacklist_by_vmbus_addr[i];
+ vlib_vmbus_addr_t *a2 = addr;
+ if (memcmp (a1, a2, sizeof (vlib_vmbus_addr_t)) == 0)
{
if (devconf == 0)
{
/* Device not whitelisted */
pool_get (conf->dev_confs, devconf);
- hash_set (conf->device_config_index_by_vmbus_addr,
- addr->as_u32[0], devconf - conf->dev_confs);
+ mhash_set (&conf->device_config_index_by_vmbus_addr, addr,
+ devconf - conf->dev_confs, 0);
devconf->vmbus_addr = *addr;
devconf->dev_addr_type = VNET_DEV_ADDR_VMBUS;
devconf->is_blacklisted = 1;
if (devconf == 0)
{
pool_get (conf->dev_confs, devconf);
- hash_set (conf->device_config_index_by_vmbus_addr,
- addr->as_u32[0], devconf - conf->dev_confs);
+ mhash_set (&conf->device_config_index_by_vmbus_addr, addr,
+ devconf - conf->dev_confs, 0);
devconf->vmbus_addr = *addr;
}
devconf->dev_addr_type = VNET_DEV_ADDR_VMBUS;
/* *INDENT-ON* */
}
+uword
+unformat_max_simd_bitwidth (unformat_input_t *input, va_list *va)
+{
+ uword *max_simd_bitwidth = va_arg (*va, uword *);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (!unformat (input, "%u", max_simd_bitwidth))
+ goto error;
+
+ if (*max_simd_bitwidth != DPDK_MAX_SIMD_BITWIDTH_256 &&
+ *max_simd_bitwidth != DPDK_MAX_SIMD_BITWIDTH_512)
+ goto error;
+ }
+ return 1;
+error:
+ return 0;
+}
+
static clib_error_t *
dpdk_device_config (dpdk_config_main_t *conf, void *addr,
dpdk_device_addr_type_t addr_type, unformat_input_t *input,
}
else if (addr_type == VNET_DEV_ADDR_VMBUS)
{
- p = hash_get (conf->device_config_index_by_vmbus_addr,
- ((vlib_vmbus_addr_t *) (addr))->as_u32[0]);
+ p = mhash_get (&conf->device_config_index_by_vmbus_addr,
+ (vlib_vmbus_addr_t *) (addr));
if (!p)
{
pool_get (conf->dev_confs, devconf);
- hash_set (conf->device_config_index_by_vmbus_addr,
- ((vlib_vmbus_addr_t *) (addr))->as_u32[0],
- devconf - conf->dev_confs);
+ mhash_set (&conf->device_config_index_by_vmbus_addr, addr,
+ devconf - conf->dev_confs, 0);
}
else
return clib_error_return (
format (0, "%s/hugepages%c", vlib_unix_get_runtime_dir (), 0);
conf->device_config_index_by_pci_addr = hash_create (0, sizeof (uword));
- conf->device_config_index_by_vmbus_addr = hash_create (0, sizeof (uword));
+ mhash_init (&conf->device_config_index_by_vmbus_addr, sizeof (uword),
+ sizeof (vlib_vmbus_addr_t));
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
else if (unformat (input, "no-multi-seg"))
conf->no_multi_seg = 1;
-
+ else if (unformat (input, "max-simd-bitwidth %U",
+ unformat_max_simd_bitwidth, &conf->max_simd_bitwidth))
+ ;
else if (unformat (input, "dev default %U", unformat_vlib_cli_sub_input,
&sub_input))
{
else if (unformat (input, "blacklist %U", unformat_vlib_vmbus_addr,
&vmbus_addr))
{
- vec_add1 (conf->blacklist_by_vmbus_addr, vmbus_addr.as_u32[0]);
+ vec_add1 (conf->blacklist_by_vmbus_addr, vmbus_addr);
}
else
if (unformat
ret = rte_eal_init (vec_len (conf->eal_init_args),
(char **) conf->eal_init_args);
+ /* enable the AVX-512 vPMDs in DPDK */
+ if (clib_cpu_supports_avx512_bitalg () &&
+ conf->max_simd_bitwidth == DPDK_MAX_SIMD_BITWIDTH_DEFAULT)
+ rte_vect_set_max_simd_bitwidth (RTE_VECT_SIMD_512);
+ else if (conf->max_simd_bitwidth != DPDK_MAX_SIMD_BITWIDTH_DEFAULT)
+ rte_vect_set_max_simd_bitwidth (conf->max_simd_bitwidth ==
+ DPDK_MAX_SIMD_BITWIDTH_256 ?
+ RTE_VECT_SIMD_256 :
+ RTE_VECT_SIMD_512);
+
/* lazy umount hugepages */
umount2 ((char *) huge_dir_path, MNT_DETACH);
rmdir ((char *) huge_dir_path);
if (LINK_STATE_ELOGS)
{
- vlib_main_t *vm = vlib_get_main ();
ELOG_TYPE_DECLARE (e) =
{
.format =
u8 old_link_state;
u8 new_link_state;
} *ed;
- ed = ELOG_DATA (&vm->elog_main, e);
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
ed->sw_if_index = xd->sw_if_index;
ed->admin_up = (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0;
ed->old_link_state = (u8)
{
if (LINK_STATE_ELOGS)
{
- vlib_main_t *vm = vlib_get_main ();
-
ELOG_TYPE_DECLARE (e) =
{
.format =
u32 sw_if_index;
u32 flags;
} *ed;
- ed = ELOG_DATA (&vm->elog_main, e);
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
ed->sw_if_index = xd->sw_if_index;
ed->flags = hw_flags;
}
if (error)
clib_error_report (error);
- error = dpdk_cryptodev_init (vm);
- if (error)
+ if (dpdk_cryptodev_init)
{
- vlib_log_warn (dpdk_main.log_cryptodev, "%U", format_clib_error, error);
- clib_error_free (error);
+ error = dpdk_cryptodev_init (vm);
+ if (error)
+ {
+ vlib_log_warn (dpdk_main.log_cryptodev, "%U", format_clib_error,
+ error);
+ clib_error_free (error);
+ }
}
tm->worker_thread_release = 1;