#include <vlib/log.h>
#include <vnet/ethernet/ethernet.h>
+#include <vnet/interface/rx_queue_funcs.h>
#include <dpdk/buffer.h>
#include <dpdk/device/dpdk.h>
#include <dpdk/cryptodev/cryptodev.h>
#include <vlib/vmbus/vmbus.h>
#include <rte_ring.h>
+#include <rte_vect.h>
#include <stdio.h>
#include <stdlib.h>
return VNET_DPDK_PORT_TYPE_UNKNOWN;
}
-static dpdk_port_type_t
-port_type_from_link_speed (u32 link_speed)
-{
- switch (link_speed)
- {
- case ETH_SPEED_NUM_1G:
- return VNET_DPDK_PORT_TYPE_ETH_1G;
- case ETH_SPEED_NUM_2_5G:
- return VNET_DPDK_PORT_TYPE_ETH_2_5G;
- case ETH_SPEED_NUM_5G:
- return VNET_DPDK_PORT_TYPE_ETH_5G;
- case ETH_SPEED_NUM_10G:
- return VNET_DPDK_PORT_TYPE_ETH_10G;
- case ETH_SPEED_NUM_20G:
- return VNET_DPDK_PORT_TYPE_ETH_20G;
- case ETH_SPEED_NUM_25G:
- return VNET_DPDK_PORT_TYPE_ETH_25G;
- case ETH_SPEED_NUM_40G:
- return VNET_DPDK_PORT_TYPE_ETH_40G;
- case ETH_SPEED_NUM_50G:
- return VNET_DPDK_PORT_TYPE_ETH_50G;
- case ETH_SPEED_NUM_56G:
- return VNET_DPDK_PORT_TYPE_ETH_56G;
- case ETH_SPEED_NUM_100G:
- return VNET_DPDK_PORT_TYPE_ETH_100G;
- default:
- return VNET_DPDK_PORT_TYPE_UNKNOWN;
- }
-}
-
static u32
dpdk_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags)
{
{
case ETHERNET_INTERFACE_FLAG_DEFAULT_L3:
/* set to L3/non-promisc mode */
- xd->flags &= ~DPDK_DEVICE_FLAG_PROMISC;
+ dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_PROMISC, 0);
break;
case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
- xd->flags |= DPDK_DEVICE_FLAG_PROMISC;
+ dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_PROMISC, 1);
break;
case ETHERNET_INTERFACE_FLAG_MTU:
- xd->port_conf.rxmode.max_rx_pkt_len = hi->max_packet_bytes;
- dpdk_device_setup (xd);
+ if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
+ rte_eth_dev_stop (xd->port_id);
+ rte_eth_dev_set_mtu (xd->port_id, hi->max_packet_bytes);
+ if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
+ rte_eth_dev_start (xd->port_id);
+ dpdk_log_debug ("[%u] mtu changed to %u", xd->port_id,
+ hi->max_packet_bytes);
return 0;
default:
return ~0;
return old;
}
-static int
-dpdk_port_crc_strip_enabled (dpdk_device_t * xd)
-{
- return !(xd->port_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC);
-}
-
/* The function check_l3cache helps check if Level 3 cache exists or not on current CPUs
return value 1: exist.
return value 0: not exist.
return 0;
}
-static void
-dpdk_enable_l4_csum_offload (dpdk_device_t * xd)
-{
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
- xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD |
- DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM;
-}
-
static clib_error_t *
dpdk_lib_init (dpdk_main_t * dm)
{
+ vnet_main_t *vnm = vnet_get_main ();
u32 nports;
- u32 mtu, max_rx_frame;
- int i;
- clib_error_t *error;
+ u16 port_id;
vlib_main_t *vm = vlib_get_main ();
vlib_thread_main_t *tm = vlib_get_thread_main ();
vnet_device_main_t *vdm = &vnet_device_main;
dpdk_device_t *xd;
vlib_pci_addr_t last_pci_addr;
u32 last_pci_addr_port = 0;
- u8 af_packet_instance_num = 0;
last_pci_addr.as_u32 = ~0;
nports = rte_eth_dev_count_avail ();
if (CLIB_DEBUG > 0)
dpdk_log_notice ("DPDK drivers found %d ports...", nports);
- if (dm->conf->enable_tcp_udp_checksum)
- dm->buffer_flags_template &= ~(VNET_BUFFER_F_L4_CHECKSUM_CORRECT
- | VNET_BUFFER_F_L4_CHECKSUM_COMPUTED);
-
/* vlib_buffer_t template */
vec_validate_aligned (dm->per_thread_data, tm->n_vlib_mains - 1,
CLIB_CACHE_LINE_BYTES);
- for (i = 0; i < tm->n_vlib_mains; i++)
+ for (int i = 0; i < tm->n_vlib_mains; i++)
{
dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data, i);
clib_memset (&ptd->buffer_template, 0, sizeof (vlib_buffer_t));
- ptd->buffer_template.flags = dm->buffer_flags_template;
vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_TX] = (u32) ~ 0;
}
- /* *INDENT-OFF* */
- RTE_ETH_FOREACH_DEV(i)
+ /* device config defaults */
+ dm->default_port_conf.n_rx_desc = DPDK_NB_RX_DESC_DEFAULT;
+ dm->default_port_conf.n_tx_desc = DPDK_NB_TX_DESC_DEFAULT;
+ dm->default_port_conf.n_rx_queues = 1;
+ dm->default_port_conf.n_tx_queues = tm->n_vlib_mains;
+ dm->default_port_conf.rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP;
+ dm->default_port_conf.max_lro_pkt_size = DPDK_MAX_LRO_SIZE_DEFAULT;
+
+ if ((clib_mem_get_default_hugepage_size () == 2 << 20) &&
+ check_l3cache () == 0)
+ dm->default_port_conf.n_rx_desc = dm->default_port_conf.n_tx_desc = 512;
+
+ RTE_ETH_FOREACH_DEV (port_id)
{
u8 addr[6];
- int vlan_off;
- struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev_info di;
struct rte_pci_device *pci_dev;
+ struct rte_vmbus_device *vmbus_dev;
dpdk_portid_t next_port_id;
dpdk_device_config_t *devconf = 0;
+ vnet_eth_interface_registration_t eir = {};
vlib_pci_addr_t pci_addr;
+ vlib_vmbus_addr_t vmbus_addr;
uword *p = 0;
- if (!rte_eth_dev_is_valid_port(i))
+ if (!rte_eth_dev_is_valid_port (port_id))
continue;
- rte_eth_dev_info_get (i, &dev_info);
+ rte_eth_dev_info_get (port_id, &di);
- if (dev_info.device == 0)
+ if (di.device == 0)
{
dpdk_log_notice ("DPDK bug: missing device info. Skipping %s device",
- dev_info.driver_name);
+ di.driver_name);
continue;
}
- pci_dev = dpdk_get_pci_device (&dev_info);
+ pci_dev = dpdk_get_pci_device (&di);
if (pci_dev)
{
pci_addr.as_u32);
}
+ vmbus_dev = dpdk_get_vmbus_device (&di);
- /* Create vnet interface */
- vec_add2_aligned (dm->devices, xd, 1, CLIB_CACHE_LINE_BYTES);
- xd->nb_rx_desc = DPDK_NB_RX_DESC_DEFAULT;
- xd->nb_tx_desc = DPDK_NB_TX_DESC_DEFAULT;
- xd->cpu_socket = (i8) rte_eth_dev_socket_id (i);
+ if (vmbus_dev)
+ {
+ unformat_input_t input_vmbus;
+ unformat_init_string (&input_vmbus, di.device->name,
+ strlen (di.device->name));
+ if (unformat (&input_vmbus, "%U", unformat_vlib_vmbus_addr,
+ &vmbus_addr))
+ {
+ p = mhash_get (&dm->conf->device_config_index_by_vmbus_addr,
+ &vmbus_addr);
+ }
+ unformat_free (&input_vmbus);
+ }
if (p)
{
devconf = pool_elt_at_index (dm->conf->dev_confs, p[0]);
- xd->name = devconf->name;
+ /* If device is blacklisted, we should skip it */
+ if (devconf->is_blacklisted)
+ {
+ continue;
+ }
}
else
devconf = &dm->conf->default_devconf;
+ /* Create vnet interface */
+ vec_add2_aligned (dm->devices, xd, 1, CLIB_CACHE_LINE_BYTES);
+ xd->cpu_socket = (i8) rte_eth_dev_socket_id (port_id);
+ clib_memcpy (&xd->conf, &dm->default_port_conf,
+ sizeof (dpdk_port_conf_t));
+
+ if (p)
+ {
+ xd->name = devconf->name;
+ }
+
/* Handle representor devices that share the same PCI ID */
- if (dev_info.switch_info.domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
- {
- if (dev_info.switch_info.port_id != (uint16_t)-1)
- xd->interface_name_suffix = format (0, "%d", dev_info.switch_info.port_id);
- }
- /* Handle interface naming for devices with multiple ports sharing same PCI ID */
- else if (pci_dev &&
- ((next_port_id = rte_eth_find_next (i + 1)) != RTE_MAX_ETHPORTS))
+ if (di.switch_info.domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
{
- struct rte_eth_dev_info di = { 0 };
+ if (di.switch_info.port_id != (uint16_t) -1)
+ xd->interface_name_suffix =
+ format (0, "%d", di.switch_info.port_id);
+ }
+ /* Handle interface naming for devices with multiple ports sharing same
+ * PCI ID */
+ else if (pci_dev && ((next_port_id = rte_eth_find_next (port_id + 1)) !=
+ RTE_MAX_ETHPORTS))
+ {
+ struct rte_eth_dev_info next_di = { 0 };
struct rte_pci_device *next_pci_dev;
- rte_eth_dev_info_get (next_port_id, &di);
- next_pci_dev = di.device ? RTE_DEV_TO_PCI (di.device) : 0;
- if (next_pci_dev &&
- pci_addr.as_u32 != last_pci_addr.as_u32 &&
+ rte_eth_dev_info_get (next_port_id, &next_di);
+ next_pci_dev = next_di.device ? RTE_DEV_TO_PCI (next_di.device) : 0;
+ if (next_pci_dev && pci_addr.as_u32 != last_pci_addr.as_u32 &&
memcmp (&pci_dev->addr, &next_pci_dev->addr,
sizeof (struct rte_pci_addr)) == 0)
{
xd->interface_name_suffix = format (0, "0");
last_pci_addr.as_u32 = pci_addr.as_u32;
- last_pci_addr_port = i;
+ last_pci_addr_port = port_id;
}
else if (pci_addr.as_u32 == last_pci_addr.as_u32)
{
xd->interface_name_suffix =
- format (0, "%u", i - last_pci_addr_port);
+ format (0, "%u", port_id - last_pci_addr_port);
}
else
{
else
last_pci_addr.as_u32 = ~0;
- clib_memcpy (&xd->tx_conf, &dev_info.default_txconf,
- sizeof (struct rte_eth_txconf));
+ if (devconf->max_lro_pkt_size)
+ xd->conf.max_lro_pkt_size = devconf->max_lro_pkt_size;
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM)
- {
- xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
- xd->flags |= DPDK_DEVICE_FLAG_RX_IP4_CKSUM;
- }
+ xd->conf.n_tx_queues = clib_min (di.max_tx_queues, xd->conf.n_tx_queues);
- if (dm->conf->enable_tcp_udp_checksum)
- {
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM)
- xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_UDP_CKSUM;
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM)
- xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TCP_CKSUM;
- }
+ if (devconf->num_tx_queues > 0 &&
+ devconf->num_tx_queues < xd->conf.n_tx_queues)
+ xd->conf.n_tx_queues = devconf->num_tx_queues;
- if (dm->conf->no_multi_seg)
- {
- xd->port_conf.txmode.offloads &= ~DEV_TX_OFFLOAD_MULTI_SEGS;
- xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_SCATTER;
- }
- else
+ if (devconf->num_rx_queues > 1 &&
+ di.max_rx_queues >= devconf->num_rx_queues)
{
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
- xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
- xd->flags |= DPDK_DEVICE_FLAG_MAYBE_MULTISEG;
- }
-
- xd->tx_q_used = clib_min (dev_info.max_tx_queues, tm->n_vlib_mains);
-
- if (devconf->num_tx_queues > 0
- && devconf->num_tx_queues < xd->tx_q_used)
- xd->tx_q_used = clib_min (xd->tx_q_used, devconf->num_tx_queues);
-
- if (devconf->num_rx_queues > 1
- && dev_info.max_rx_queues >= devconf->num_rx_queues)
- {
- xd->rx_q_used = devconf->num_rx_queues;
- xd->port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
- if (devconf->rss_fn == 0)
- xd->port_conf.rx_adv_conf.rss_conf.rss_hf =
- ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP;
- else
+ xd->conf.n_rx_queues = devconf->num_rx_queues;
+ if (devconf->rss_fn)
{
u64 unsupported_bits;
- xd->port_conf.rx_adv_conf.rss_conf.rss_hf = devconf->rss_fn;
- unsupported_bits = xd->port_conf.rx_adv_conf.rss_conf.rss_hf;
- unsupported_bits &= ~dev_info.flow_type_rss_offloads;
+ xd->conf.rss_hf = devconf->rss_fn;
+ unsupported_bits = xd->conf.rss_hf;
+ unsupported_bits &= ~di.flow_type_rss_offloads;
if (unsupported_bits)
dpdk_log_warn ("Unsupported RSS hash functions: %U",
format_dpdk_rss_hf_name, unsupported_bits);
}
- xd->port_conf.rx_adv_conf.rss_conf.rss_hf &=
- dev_info.flow_type_rss_offloads;
+ xd->conf.rss_hf &= di.flow_type_rss_offloads;
}
- else
- xd->rx_q_used = 1;
- xd->flags |= DPDK_DEVICE_FLAG_PMD;
+ if (devconf->num_rx_desc)
+ xd->conf.n_rx_desc = devconf->num_rx_desc;
+
+ if (devconf->num_tx_desc)
+ xd->conf.n_tx_desc = devconf->num_tx_desc;
+
+ vec_validate_aligned (xd->rx_queues, xd->conf.n_rx_queues - 1,
+ CLIB_CACHE_LINE_BYTES);
/* workaround for drivers not setting driver_name */
- if ((!dev_info.driver_name) && (pci_dev))
- dev_info.driver_name = pci_dev->driver->driver.name;
+ if ((!di.driver_name) && (pci_dev))
+ di.driver_name = pci_dev->driver->driver.name;
- ASSERT (dev_info.driver_name);
+ ASSERT (di.driver_name);
if (!xd->pmd)
{
-
-#define _(s,f) else if (dev_info.driver_name && \
- !strcmp(dev_info.driver_name, s)) \
- xd->pmd = VNET_DPDK_PMD_##f;
+#define _(s, f) \
+ else if (di.driver_name && !strcmp (di.driver_name, s)) xd->pmd = \
+ VNET_DPDK_PMD_##f;
if (0)
;
foreach_dpdk_pmd
#undef _
- else
- xd->pmd = VNET_DPDK_PMD_UNKNOWN;
+ else xd->pmd = VNET_DPDK_PMD_UNKNOWN;
xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN;
- xd->nb_rx_desc = DPDK_NB_RX_DESC_DEFAULT;
- xd->nb_tx_desc = DPDK_NB_TX_DESC_DEFAULT;
switch (xd->pmd)
{
/* Drivers with valid speed_capa set */
+ case VNET_DPDK_PMD_I40E:
+ dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INT_UNMASKABLE, 1);
+ /* fall through */
case VNET_DPDK_PMD_E1000EM:
case VNET_DPDK_PMD_IGB:
+ case VNET_DPDK_PMD_IGC:
case VNET_DPDK_PMD_IXGBE:
- case VNET_DPDK_PMD_I40E:
case VNET_DPDK_PMD_ICE:
- xd->port_type = port_type_from_speed_capa (&dev_info);
- xd->supported_flow_actions = VNET_FLOW_ACTION_MARK |
- VNET_FLOW_ACTION_REDIRECT_TO_NODE |
+ xd->supported_flow_actions =
+ VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
VNET_FLOW_ACTION_REDIRECT_TO_QUEUE |
- VNET_FLOW_ACTION_BUFFER_ADVANCE |
- VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP |
- VNET_FLOW_ACTION_RSS;
-
- if (dm->conf->no_tx_checksum_offload == 0)
- {
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
- xd->flags |=
- DPDK_DEVICE_FLAG_TX_OFFLOAD |
- DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM;
- }
-
-
- break;
+ VNET_FLOW_ACTION_BUFFER_ADVANCE | VNET_FLOW_ACTION_COUNT |
+ VNET_FLOW_ACTION_DROP | VNET_FLOW_ACTION_RSS;
+ dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM, 1);
+ xd->conf.enable_rxq_int = 1;
+ /* fall through */
+ case VNET_DPDK_PMD_MLX5:
case VNET_DPDK_PMD_CXGBE:
case VNET_DPDK_PMD_MLX4:
- case VNET_DPDK_PMD_MLX5:
case VNET_DPDK_PMD_QEDE:
case VNET_DPDK_PMD_BNXT:
- xd->port_type = port_type_from_speed_capa (&dev_info);
+ case VNET_DPDK_PMD_ENIC:
+ xd->port_type = port_type_from_speed_capa (&di);
break;
/* SR-IOV VFs */
+ case VNET_DPDK_PMD_I40EVF:
+ dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INT_UNMASKABLE, 1);
+ /* fall through */
case VNET_DPDK_PMD_IGBVF:
case VNET_DPDK_PMD_IXGBEVF:
- case VNET_DPDK_PMD_I40EVF:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
- if (dm->conf->no_tx_checksum_offload == 0)
- {
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
- xd->flags |=
- DPDK_DEVICE_FLAG_TX_OFFLOAD |
- DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM;
- }
+ dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM, 1);
+ /* DPDK bug in multiqueue... */
+ /* xd->port_conf.intr_conf.rxq = 1; */
break;
/* iAVF */
case VNET_DPDK_PMD_IAVF:
- xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
- xd->supported_flow_actions = VNET_FLOW_ACTION_MARK |
- VNET_FLOW_ACTION_REDIRECT_TO_NODE |
+ dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INT_UNMASKABLE, 1);
+ dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM, 1);
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
+ xd->supported_flow_actions =
+ VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
VNET_FLOW_ACTION_REDIRECT_TO_QUEUE |
- VNET_FLOW_ACTION_BUFFER_ADVANCE |
- VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP;
-
- if (dm->conf->no_tx_checksum_offload == 0)
- {
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
- xd->flags |=
- DPDK_DEVICE_FLAG_TX_OFFLOAD |
- DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM;
- }
- break;
+ VNET_FLOW_ACTION_BUFFER_ADVANCE | VNET_FLOW_ACTION_COUNT |
+ VNET_FLOW_ACTION_DROP | VNET_FLOW_ACTION_RSS;
+ /* DPDK bug in multiqueue... */
+ /* xd->port_conf.intr_conf.rxq = 1; */
+ break;
case VNET_DPDK_PMD_THUNDERX:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
-
- if (dm->conf->no_tx_checksum_offload == 0)
- {
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
- xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD;
- }
break;
case VNET_DPDK_PMD_ENA:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
- xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_SCATTER;
+ xd->conf.disable_rx_scatter = 1;
+ xd->conf.enable_rxq_int = 1;
break;
case VNET_DPDK_PMD_DPAA2:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G;
break;
- /* Cisco VIC */
- case VNET_DPDK_PMD_ENIC:
- {
- struct rte_eth_link l;
- rte_eth_link_get_nowait (i, &l);
- xd->port_type = port_type_from_link_speed (l.link_speed);
- if (dm->conf->enable_tcp_udp_checksum)
- dpdk_enable_l4_csum_offload (xd);
- }
- break;
-
/* Intel Red Rock Canyon */
case VNET_DPDK_PMD_FM10K:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_SWITCH;
/* virtio */
case VNET_DPDK_PMD_VIRTIO:
- xd->port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
+ xd->conf.disable_rss = 1;
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G;
- xd->nb_rx_desc = DPDK_NB_RX_DESC_VIRTIO;
- xd->nb_tx_desc = DPDK_NB_TX_DESC_VIRTIO;
+ xd->conf.n_rx_desc = DPDK_NB_RX_DESC_VIRTIO;
+ xd->conf.n_tx_desc = DPDK_NB_TX_DESC_VIRTIO;
+ xd->conf.enable_rxq_int = 1;
break;
/* vmxnet3 */
case VNET_DPDK_PMD_VMXNET3:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G;
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
break;
case VNET_DPDK_PMD_AF_PACKET:
xd->port_type = VNET_DPDK_PORT_TYPE_AF_PACKET;
- xd->af_packet_instance_num = af_packet_instance_num++;
break;
case VNET_DPDK_PMD_VIRTIO_USER:
case VNET_DPDK_PMD_FAILSAFE:
xd->port_type = VNET_DPDK_PORT_TYPE_FAILSAFE;
- xd->port_conf.intr_conf.lsc = 1;
+ xd->conf.enable_lsc_int = 1;
break;
case VNET_DPDK_PMD_NETVSC:
- {
- struct rte_eth_link l;
- rte_eth_link_get_nowait (i, &l);
- xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
- }
+ {
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
+ }
break;
default:
xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN;
}
-
- if (devconf->num_rx_desc)
- xd->nb_rx_desc = devconf->num_rx_desc;
- else {
-
- /* If num_rx_desc is not specified by VPP user, the current CPU is working
- with 2M page and has no L3 cache, default num_rx_desc is changed to 512
- from original 1024 to help reduce TLB misses.
- */
- if ((clib_mem_get_default_hugepage_size () == 2 << 20)
- && check_l3cache() == 0)
- xd->nb_rx_desc = 512;
- }
-
- if (devconf->num_tx_desc)
- xd->nb_tx_desc = devconf->num_tx_desc;
- else {
-
- /* If num_tx_desc is not specified by VPP user, the current CPU is working
- with 2M page and has no L3 cache, default num_tx_desc is changed to 512
- from original 1024 to help reduce TLB misses.
- */
- if ((clib_mem_get_default_hugepage_size () == 2 << 20)
- && check_l3cache() == 0)
- xd->nb_tx_desc = 512;
- }
- }
+ }
if (xd->pmd == VNET_DPDK_PMD_AF_PACKET)
{
addr[1] = 0xfe;
}
else
- rte_eth_macaddr_get (i, (void *) addr);
+ rte_eth_macaddr_get (port_id, (void *) addr);
- xd->port_id = i;
+ xd->port_id = port_id;
xd->device_index = xd - dm->devices;
xd->per_interface_next_index = ~0;
/* assign interface to input thread */
int q;
- error = ethernet_register_interface
- (dm->vnet_main, dpdk_device_class.index, xd->device_index,
- /* ethernet address */ addr,
- &xd->hw_if_index, dpdk_flag_change);
- if (error)
- return error;
+ eir.dev_class_index = dpdk_device_class.index;
+ eir.dev_instance = xd->device_index;
+ eir.address = addr;
+ eir.cb.flag_change = dpdk_flag_change;
+ xd->hw_if_index = vnet_eth_register_interface (vnm, &eir);
- /*
- * Ensure default mtu is not > the mtu read from the hardware.
- * Otherwise rte_eth_dev_configure() will fail and the port will
- * not be available.
- * Calculate max_frame_size and mtu supported by NIC
- */
- if (ETHERNET_MAX_PACKET_BYTES > dev_info.max_rx_pktlen)
- {
- /*
- * This device does not support the platforms's max frame
- * size. Use it's advertised mru instead.
- */
- max_rx_frame = dev_info.max_rx_pktlen;
- mtu = dev_info.max_rx_pktlen - sizeof (ethernet_header_t);
- }
- else
- {
- /* VPP treats MTU and max_rx_pktlen both equal to
- * ETHERNET_MAX_PACKET_BYTES, if dev_info.max_rx_pktlen >=
- * ETHERNET_MAX_PACKET_BYTES + sizeof(ethernet_header_t)
- */
- if (dev_info.max_rx_pktlen >= (ETHERNET_MAX_PACKET_BYTES +
- sizeof (ethernet_header_t)))
- {
- mtu = ETHERNET_MAX_PACKET_BYTES;
- max_rx_frame = ETHERNET_MAX_PACKET_BYTES;
-
- /*
- * Some platforms do not account for Ethernet FCS (4 bytes) in
- * MTU calculations. To interop with them increase mru but only
- * if the device's settings can support it.
- */
- if (dpdk_port_crc_strip_enabled (xd) &&
- (dev_info.max_rx_pktlen >= (ETHERNET_MAX_PACKET_BYTES +
- sizeof (ethernet_header_t) +
- 4)))
- {
- max_rx_frame += 4;
- }
- }
- else
- {
- max_rx_frame = ETHERNET_MAX_PACKET_BYTES;
- mtu = ETHERNET_MAX_PACKET_BYTES - sizeof (ethernet_header_t);
-
- if (dpdk_port_crc_strip_enabled (xd) &&
- (dev_info.max_rx_pktlen >= (ETHERNET_MAX_PACKET_BYTES + 4)))
- {
- max_rx_frame += 4;
- }
- }
- }
-
- if (xd->pmd == VNET_DPDK_PMD_FAILSAFE)
- {
- /* failsafe device numerables are reported with active device only,
- * need to query the mtu for current device setup to overwrite
- * reported value.
- */
- uint16_t dev_mtu;
- if (!rte_eth_dev_get_mtu (i, &dev_mtu))
- {
- mtu = dev_mtu;
- max_rx_frame = mtu + sizeof (ethernet_header_t);
-
- if (dpdk_port_crc_strip_enabled (xd))
- {
- max_rx_frame += 4;
- }
- }
- }
-
- /*Set port rxmode config */
- xd->port_conf.rxmode.max_rx_pkt_len = max_rx_frame;
-
- sw = vnet_get_hw_sw_interface (dm->vnet_main, xd->hw_if_index);
+ sw = vnet_get_hw_sw_interface (vnm, xd->hw_if_index);
xd->sw_if_index = sw->sw_if_index;
- vnet_hw_interface_set_input_node (dm->vnet_main, xd->hw_if_index,
- dpdk_input_node.index);
+ vnet_hw_if_set_input_node (vnm, xd->hw_if_index, dpdk_input_node.index);
if (devconf->workers)
{
- int i;
+ int j;
q = 0;
- clib_bitmap_foreach (i, devconf->workers, ({
- vnet_hw_interface_assign_rx_thread (dm->vnet_main, xd->hw_if_index, q++,
- vdm->first_worker_thread_index + i);
- }));
+ clib_bitmap_foreach (j, devconf->workers)
+ {
+ dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, q);
+ rxq->queue_index = vnet_hw_if_register_rx_queue (
+ vnm, xd->hw_if_index, q++, vdm->first_worker_thread_index + j);
+ }
}
else
- for (q = 0; q < xd->rx_q_used; q++)
+ for (q = 0; q < xd->conf.n_rx_queues; q++)
{
- vnet_hw_interface_assign_rx_thread (dm->vnet_main, xd->hw_if_index, q, /* any */
- ~1);
+ dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, q);
+ rxq->queue_index = vnet_hw_if_register_rx_queue (
+ vnm, xd->hw_if_index, q, VNET_HW_IF_RXQ_THREAD_ANY);
}
+
/*Get vnet hardware interface */
- hi = vnet_get_hw_interface (dm->vnet_main, xd->hw_if_index);
+ hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
- /*Override default max_packet_bytes and max_supported_bytes set in
- * ethernet_register_interface() above*/
if (hi)
{
- hi->max_packet_bytes = mtu;
- hi->max_supported_packet_bytes = max_rx_frame;
hi->numa_node = xd->cpu_socket;
/* Indicate ability to support L3 DMAC filtering and
* initialize interface to L3 non-promisc mode */
- hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER;
- ethernet_set_flags (dm->vnet_main, xd->hw_if_index,
- ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
+ ethernet_set_flags (vnm, xd->hw_if_index,
+ ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
}
- if (dm->conf->no_tx_checksum_offload == 0)
- if (xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD && hi != NULL)
- hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD;
-
- if (devconf->tso == DPDK_DEVICE_TSO_ON && hi != NULL)
+ if (devconf->tso == DPDK_DEVICE_TSO_ON)
{
/*tcp_udp checksum must be enabled*/
- if ((dm->conf->enable_tcp_udp_checksum) &&
- (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD))
- {
- hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
- xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_UDP_TSO;
- }
+ if (xd->conf.enable_tcp_udp_checksum == 0)
+ dpdk_log_warn ("[%u] TCP/UDP checksum offload must be enabled",
+ xd->port_id);
+ else if ((di.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0)
+ dpdk_log_warn ("[%u] TSO not supported by device", xd->port_id);
else
- clib_warning ("%s: TCP/UDP checksum offload must be enabled",
- hi->name);
+ xd->conf.enable_tso = 1;
}
dpdk_device_setup (xd);
/* rss queues should be configured after dpdk_device_setup() */
if ((hi != NULL) && (devconf->rss_queues != NULL))
- {
- if (vnet_hw_interface_set_rss_queues
- (vnet_get_main (), hi, devconf->rss_queues))
- {
- clib_warning ("%s: Failed to set rss queues", hi->name);
- }
- }
+ {
+ if (vnet_hw_interface_set_rss_queues (vnet_get_main (), hi,
+ devconf->rss_queues))
+ {
+ clib_warning ("%s: Failed to set rss queues", hi->name);
+ }
+ }
if (vec_len (xd->errors))
- dpdk_log_err ("setup failed for device %U. Errors:\n %U",
- format_dpdk_device_name, i,
- format_dpdk_device_errors, xd);
-
- /*
- * A note on Cisco VIC (PMD_ENIC) and VLAN:
- *
- * With Cisco VIC vNIC, every ingress packet is tagged. On a
- * trunk vNIC (C series "standalone" server), packets on no VLAN
- * are tagged with vlan 0. On an access vNIC (standalone or B
- * series "blade" server), packets on the default/native VLAN
- * are tagged with that vNIC's VLAN. VPP expects these packets
- * to be untagged, and previously enabled VLAN strip on VIC by
- * default. But it also broke vlan sub-interfaces.
- *
- * The VIC adapter has "untag default vlan" ingress VLAN rewrite
- * mode, which removes tags from these packets. VPP now includes
- * a local patch for the enic driver to use this untag mode, so
- * enabling vlan stripping is no longer needed. In future, the
- * driver + dpdk will have an API to set the mode after
- * rte_eal_init. Then, this note and local patch will be
- * removed.
- */
-
- /*
- * VLAN stripping: default to VLAN strip disabled, unless specified
- * otherwise in the startup config.
- */
-
- vlan_off = rte_eth_dev_get_vlan_offload (xd->port_id);
- if (devconf->vlan_strip_offload == DPDK_DEVICE_VLAN_STRIP_ON)
- {
- vlan_off |= ETH_VLAN_STRIP_OFFLOAD;
- if (rte_eth_dev_set_vlan_offload (xd->port_id, vlan_off) >= 0)
- dpdk_log_info ("VLAN strip enabled for interface\n");
- else
- dpdk_log_warn ("VLAN strip cannot be supported by interface\n");
- xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
- }
- else
- {
- if (vlan_off & ETH_VLAN_STRIP_OFFLOAD)
- {
- vlan_off &= ~ETH_VLAN_STRIP_OFFLOAD;
- if (rte_eth_dev_set_vlan_offload (xd->port_id, vlan_off) >= 0)
- dpdk_log_warn ("set VLAN offload failed\n");
- }
- xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
- }
-
- if (hi)
- hi->max_packet_bytes = xd->port_conf.rxmode.max_rx_pkt_len
- - sizeof (ethernet_header_t);
- else
- dpdk_log_warn ("hi NULL");
-
- if (dm->conf->no_multi_seg)
- mtu = mtu > ETHER_MAX_LEN ? ETHER_MAX_LEN : mtu;
-
- rte_eth_dev_set_mtu (xd->port_id, mtu);
-}
+ dpdk_log_err ("setup failed for device %U. Errors:\n %U",
+ format_dpdk_device_name, port_id,
+ format_dpdk_device_errors, xd);
+ }
- /* *INDENT-ON* */
+ for (int i = 0; i < vec_len (dm->devices); i++)
+ vnet_hw_if_update_runtime_data (vnm, dm->devices[i].hw_if_index);
return 0;
}
if (!p)
{
- skipped:
- continue;
- }
+ skipped_pci:
+ continue;
+ }
devconf = pool_elt_at_index (conf->dev_confs, p[0]);
}
hash_set (conf->device_config_index_by_pci_addr, addr->as_u32,
devconf - conf->dev_confs);
devconf->pci_addr.as_u32 = addr->as_u32;
- devconf->is_blacklisted = 1;
- goto skipped;
- }
- else /* explicitly whitelisted, ignore the device blacklist */
- break;
- }
+ devconf->dev_addr_type = VNET_DEV_ADDR_PCI;
+ devconf->is_blacklisted = 1;
+ goto skipped_pci;
+ }
+ else /* explicitly whitelisted, ignore the device blacklist */
+ break;
+ }
}
/* virtio */
devconf - conf->dev_confs);
devconf->pci_addr.as_u32 = addr->as_u32;
}
+ devconf->dev_addr_type = VNET_DEV_ADDR_PCI;
devconf->is_blacklisted = 1;
clib_error_report (error);
}
{
clib_error_t *error;
vlib_vmbus_addr_t *addrs, *addr = 0;
+ int num_whitelisted = vec_len (conf->dev_confs);
+ int i;
addrs = vlib_vmbus_get_all_dev_addrs ();
/* *INDENT-OFF* */
vec_foreach (addr, addrs)
{
- error = vlib_vmbus_bind_to_uio (addr);
+ dpdk_device_config_t *devconf = 0;
+ if (num_whitelisted)
+ {
+ uword *p =
+ mhash_get (&conf->device_config_index_by_vmbus_addr, addr);
+ if (!p)
+ {
+ /* No devices blacklisted, but have whitelisted. blacklist all
+ * non-whitelisted */
+ pool_get (conf->dev_confs, devconf);
+ mhash_set (&conf->device_config_index_by_vmbus_addr, addr,
+ devconf - conf->dev_confs, 0);
+ devconf->vmbus_addr = *addr;
+ devconf->dev_addr_type = VNET_DEV_ADDR_VMBUS;
+ devconf->is_blacklisted = 1;
+ skipped_vmbus:
+ continue;
+ }
+
+ devconf = pool_elt_at_index (conf->dev_confs, p[0]);
+ }
+ /* Enforce Device blacklist by vmbus_addr */
+ for (i = 0; i < vec_len (conf->blacklist_by_vmbus_addr); i++)
+ {
+ vlib_vmbus_addr_t *a1 = &conf->blacklist_by_vmbus_addr[i];
+ vlib_vmbus_addr_t *a2 = addr;
+ if (memcmp (a1, a2, sizeof (vlib_vmbus_addr_t)) == 0)
+ {
+ if (devconf == 0)
+ {
+ /* Device not whitelisted */
+ pool_get (conf->dev_confs, devconf);
+ mhash_set (&conf->device_config_index_by_vmbus_addr, addr,
+ devconf - conf->dev_confs, 0);
+ devconf->vmbus_addr = *addr;
+ devconf->dev_addr_type = VNET_DEV_ADDR_VMBUS;
+ devconf->is_blacklisted = 1;
+ goto skipped_vmbus;
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+
+ error = vlib_vmbus_bind_to_uio (addr);
if (error)
{
+ if (devconf == 0)
+ {
+ pool_get (conf->dev_confs, devconf);
+ mhash_set (&conf->device_config_index_by_vmbus_addr, addr,
+ devconf - conf->dev_confs, 0);
+ devconf->vmbus_addr = *addr;
+ }
+ devconf->dev_addr_type = VNET_DEV_ADDR_VMBUS;
+ devconf->is_blacklisted = 1;
clib_error_report (error);
}
}
/* *INDENT-ON* */
}
+uword
+unformat_max_simd_bitwidth (unformat_input_t *input, va_list *va)
+{
+ uword *max_simd_bitwidth = va_arg (*va, uword *);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (!unformat (input, "%u", max_simd_bitwidth))
+ goto error;
+
+ if (*max_simd_bitwidth != DPDK_MAX_SIMD_BITWIDTH_256 &&
+ *max_simd_bitwidth != DPDK_MAX_SIMD_BITWIDTH_512)
+ goto error;
+ }
+ return 1;
+error:
+ return 0;
+}
+
static clib_error_t *
-dpdk_device_config (dpdk_config_main_t * conf, vlib_pci_addr_t pci_addr,
- unformat_input_t * input, u8 is_default)
+dpdk_device_config (dpdk_config_main_t *conf, void *addr,
+ dpdk_device_addr_type_t addr_type, unformat_input_t *input,
+ u8 is_default)
{
clib_error_t *error = 0;
uword *p;
- dpdk_device_config_t *devconf;
+ dpdk_device_config_t *devconf = 0;
unformat_input_t sub_input;
if (is_default)
{
devconf = &conf->default_devconf;
}
- else
+ else if (addr_type == VNET_DEV_ADDR_PCI)
{
- p = hash_get (conf->device_config_index_by_pci_addr, pci_addr.as_u32);
+ p = hash_get (conf->device_config_index_by_pci_addr,
+ ((vlib_pci_addr_t *) (addr))->as_u32);
if (!p)
{
pool_get (conf->dev_confs, devconf);
- hash_set (conf->device_config_index_by_pci_addr, pci_addr.as_u32,
+ hash_set (conf->device_config_index_by_pci_addr,
+ ((vlib_pci_addr_t *) (addr))->as_u32,
devconf - conf->dev_confs);
}
else
return clib_error_return (0,
"duplicate configuration for PCI address %U",
- format_vlib_pci_addr, &pci_addr);
+ format_vlib_pci_addr, addr);
+ }
+ else if (addr_type == VNET_DEV_ADDR_VMBUS)
+ {
+ p = mhash_get (&conf->device_config_index_by_vmbus_addr,
+ (vlib_vmbus_addr_t *) (addr));
+
+ if (!p)
+ {
+ pool_get (conf->dev_confs, devconf);
+ mhash_set (&conf->device_config_index_by_vmbus_addr, addr,
+ devconf - conf->dev_confs, 0);
+ }
+ else
+ return clib_error_return (
+ 0, "duplicate configuration for VMBUS address %U",
+ format_vlib_vmbus_addr, addr);
}
- devconf->pci_addr.as_u32 = pci_addr.as_u32;
- devconf->tso = DPDK_DEVICE_TSO_DEFAULT;
+ if (addr_type == VNET_DEV_ADDR_PCI)
+ {
+ devconf->pci_addr.as_u32 = ((vlib_pci_addr_t *) (addr))->as_u32;
+ devconf->tso = DPDK_DEVICE_TSO_DEFAULT;
+ devconf->dev_addr_type = VNET_DEV_ADDR_PCI;
+ }
+ else if (addr_type == VNET_DEV_ADDR_VMBUS)
+ {
+ devconf->vmbus_addr = *((vlib_vmbus_addr_t *) (addr));
+ devconf->tso = DPDK_DEVICE_TSO_DEFAULT;
+ devconf->dev_addr_type = VNET_DEV_ADDR_VMBUS;
+ }
if (!input)
return 0;
if (error)
break;
}
- else if (unformat (input, "vlan-strip-offload off"))
- devconf->vlan_strip_offload = DPDK_DEVICE_VLAN_STRIP_OFF;
- else if (unformat (input, "vlan-strip-offload on"))
- devconf->vlan_strip_offload = DPDK_DEVICE_VLAN_STRIP_ON;
else if (unformat (input, "tso on"))
{
devconf->tso = DPDK_DEVICE_TSO_ON;
else if (unformat (input, "rss-queues %U",
unformat_bitmap_list, &devconf->rss_queues))
;
+ else if (unformat (input, "max-lro-pkt-size %u",
+ &devconf->max_lro_pkt_size))
+ ;
else
{
error = clib_error_return (0, "unknown input `%U'",
else if (devconf->workers &&
clib_bitmap_count_set_bits (devconf->workers) !=
devconf->num_rx_queues)
- error =
- clib_error_return (0,
- "%U: number of worker threads must be "
- "equal to number of rx queues", format_vlib_pci_addr,
- &pci_addr);
+ error = clib_error_return (0,
+ "%U: number of worker threads must be "
+ "equal to number of rx queues",
+ format_vlib_pci_addr, addr);
return error;
}
while (unformat_user (&input, unformat_line, &line))
{
- dpdk_log_notice ("%v", line);
+ int skip = 0;
+ vec_add1 (line, 0);
+
+ /* unfortunatelly DPDK polutes log with this error messages
+ * even when we pass --in-memory which means no secondary process */
+ if (strstr ((char *) line, "WARNING! Base virtual address hint"))
+ skip = 1;
+ else if (strstr ((char *) line, "This may cause issues with mapping "
+ "memory into secondary processes"))
+ skip = 1;
+ vec_pop (line);
+ if (!skip)
+ dpdk_log_notice ("%v", line);
vec_free (line);
}
static clib_error_t *
dpdk_config (vlib_main_t * vm, unformat_input_t * input)
{
+ dpdk_main_t *dm = &dpdk_main;
clib_error_t *error = 0;
dpdk_config_main_t *conf = &dpdk_config_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
dpdk_device_config_t *devconf;
- vlib_pci_addr_t pci_addr;
+ vlib_pci_addr_t pci_addr = { 0 };
+ vlib_vmbus_addr_t vmbus_addr = { 0 };
unformat_input_t sub_input;
uword default_hugepage_sz, x;
u8 *s, *tmp = 0;
int ret, i;
int num_whitelisted = 0;
+ int eal_no_hugetlb = 0;
u8 no_pci = 0;
u8 no_vmbus = 0;
u8 file_prefix = 0;
format (0, "%s/hugepages%c", vlib_unix_get_runtime_dir (), 0);
conf->device_config_index_by_pci_addr = hash_create (0, sizeof (uword));
+ mhash_init (&conf->device_config_index_by_vmbus_addr, sizeof (uword),
+ sizeof (vlib_vmbus_addr_t));
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "no-hugetlb"))
{
vec_add1 (conf->eal_init_args, (u8 *) "--no-huge");
+ eal_no_hugetlb = 1;
}
else if (unformat (input, "telemetry"))
conf->enable_telemetry = 1;
else if (unformat (input, "enable-tcp-udp-checksum"))
- conf->enable_tcp_udp_checksum = 1;
-
+ {
+ dm->default_port_conf.enable_tcp_udp_checksum = 1;
+ if (unformat (input, "enable-outer-checksum-offload"))
+ dm->default_port_conf.enable_outer_checksum_offload = 1;
+ }
else if (unformat (input, "no-tx-checksum-offload"))
- conf->no_tx_checksum_offload = 1;
+ dm->default_port_conf.disable_tx_checksum_offload = 1;
else if (unformat (input, "decimal-interface-names"))
conf->interface_name_format_decimal = 1;
else if (unformat (input, "no-multi-seg"))
- conf->no_multi_seg = 1;
-
+ dm->default_port_conf.disable_multi_seg = 1;
+ else if (unformat (input, "enable-lro"))
+ dm->default_port_conf.enable_lro = 1;
+ else if (unformat (input, "max-simd-bitwidth %U",
+ unformat_max_simd_bitwidth, &conf->max_simd_bitwidth))
+ ;
else if (unformat (input, "dev default %U", unformat_vlib_cli_sub_input,
&sub_input))
{
error =
- dpdk_device_config (conf, (vlib_pci_addr_t) (u32) ~ 1, &sub_input,
- 1);
+ dpdk_device_config (conf, 0, VNET_DEV_ADDR_ANY, &sub_input, 1);
if (error)
return error;
(input, "dev %U %U", unformat_vlib_pci_addr, &pci_addr,
unformat_vlib_cli_sub_input, &sub_input))
{
- error = dpdk_device_config (conf, pci_addr, &sub_input, 0);
+ error = dpdk_device_config (conf, &pci_addr, VNET_DEV_ADDR_PCI,
+ &sub_input, 0);
if (error)
return error;
}
else if (unformat (input, "dev %U", unformat_vlib_pci_addr, &pci_addr))
{
- error = dpdk_device_config (conf, pci_addr, 0, 0);
+ error =
+ dpdk_device_config (conf, &pci_addr, VNET_DEV_ADDR_PCI, 0, 0);
+
+ if (error)
+ return error;
+
+ num_whitelisted++;
+ }
+ else if (unformat (input, "dev %U %U", unformat_vlib_vmbus_addr,
+ &vmbus_addr, unformat_vlib_cli_sub_input, &sub_input))
+ {
+ error = dpdk_device_config (conf, &vmbus_addr, VNET_DEV_ADDR_VMBUS,
+ &sub_input, 0);
+
+ if (error)
+ return error;
+
+ num_whitelisted++;
+ }
+ else if (unformat (input, "dev %U", unformat_vlib_vmbus_addr,
+ &vmbus_addr))
+ {
+ error =
+ dpdk_device_config (conf, &vmbus_addr, VNET_DEV_ADDR_VMBUS, 0, 0);
if (error)
return error;
num_whitelisted++;
}
- else if (unformat (input, "num-mem-channels %d", &conf->nchannels))
- conf->nchannels_set_manually = 0;
- else if (unformat (input, "num-crypto-mbufs %d",
- &conf->num_crypto_mbufs))
- ;
else if (unformat (input, "uio-driver %s", &conf->uio_driver_name))
;
else if (unformat (input, "socket-mem %s", &socket_mem))
tmp = format (0, "--no-pci%c", 0);
vec_add1 (conf->eal_init_args, tmp);
}
+ else if (unformat (input, "blacklist %U", unformat_vlib_vmbus_addr,
+ &vmbus_addr))
+ {
+ vec_add1 (conf->blacklist_by_vmbus_addr, vmbus_addr);
+ }
else
if (unformat
(input, "blacklist %x:%x:%x.%x", &domain, &bus, &device, &func))
vec_add1 (conf->eal_init_args, s); \
}
foreach_eal_single_hyphen_arg
-#undef _
-#define _(a,b) \
- else if (unformat(input, #a " %s", &s)) \
- { \
- tmp = format (0, "-%s%c", #b, 0); \
- vec_add1 (conf->eal_init_args, tmp); \
- vec_add1 (s, 0); \
- vec_add1 (conf->eal_init_args, s); \
- conf->a##_set_manually = 1; \
- }
- foreach_eal_single_hyphen_mandatory_arg
#undef _
else if (unformat (input, "default"))
;
if (!conf->uio_driver_name)
conf->uio_driver_name = format (0, "auto%c", 0);
- default_hugepage_sz = clib_mem_get_default_hugepage_size ();
-
- /* *INDENT-OFF* */
- clib_bitmap_foreach (x, tm->cpu_socket_bitmap, (
+ if (eal_no_hugetlb == 0)
{
- clib_error_t *e;
- uword n_pages;
- /* preallocate at least 16MB of hugepages per socket,
- if more is needed it is up to consumer to preallocate more */
- n_pages = round_pow2 ((uword) 16 << 20, default_hugepage_sz);
- n_pages /= default_hugepage_sz;
-
- if ((e = clib_sysfs_prealloc_hugepages(x, 0, n_pages)))
- clib_error_report (e);
- }));
- /* *INDENT-ON* */
+ vec_add1 (conf->eal_init_args, (u8 *) "--in-memory");
+
+ default_hugepage_sz = clib_mem_get_default_hugepage_size ();
+
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (x, tm->cpu_socket_bitmap)
+ {
+ clib_error_t *e;
+ uword n_pages;
+ /* preallocate at least 16MB of hugepages per socket,
+ if more is needed it is up to consumer to preallocate more */
+ n_pages = round_pow2 ((uword) 16 << 20, default_hugepage_sz);
+ n_pages /= default_hugepage_sz;
+
+ if ((e = clib_sysfs_prealloc_hugepages(x, 0, n_pages)))
+ clib_error_report (e);
+ }
+ /* *INDENT-ON* */
+ }
/* on/off dpdk's telemetry thread */
if (conf->enable_telemetry == 0)
if (error)
return error;
- /* I'll bet that -c and -n must be the first and second args... */
- if (!conf->coremask_set_manually)
- {
- vlib_thread_registration_t *tr;
- uword *coremask = 0;
- int i;
-
- /* main thread core */
- coremask = clib_bitmap_set (coremask, tm->main_lcore, 1);
-
- for (i = 0; i < vec_len (tm->registrations); i++)
- {
- tr = tm->registrations[i];
- coremask = clib_bitmap_or (coremask, tr->coremask);
- }
-
- vec_insert (conf->eal_init_args, 2, 1);
- conf->eal_init_args[1] = (u8 *) "-c";
- tmp = format (0, "%U%c", format_bitmap_hex, coremask, 0);
- conf->eal_init_args[2] = tmp;
- clib_bitmap_free (coremask);
- }
-
- if (!conf->nchannels_set_manually)
- {
- vec_insert (conf->eal_init_args, 2, 3);
- conf->eal_init_args[3] = (u8 *) "-n";
- tmp = format (0, "%d", conf->nchannels);
- vec_terminate_c_string (tmp);
- conf->eal_init_args[4] = tmp;
- }
-
if (no_pci == 0 && geteuid () == 0)
dpdk_bind_devices_to_uio (conf);
if (devconf->x == 0 && conf->default_devconf.x > 0) \
devconf->x = conf->default_devconf.x ;
- /* *INDENT-OFF* */
- pool_foreach (devconf, conf->dev_confs, ({
+ pool_foreach (devconf, conf->dev_confs) {
/* default per-device config items */
foreach_dpdk_device_config_item
- /* copy vlan_strip config from default device */
- if (devconf->vlan_strip_offload == 0 &&
- conf->default_devconf.vlan_strip_offload > 0)
- devconf->vlan_strip_offload =
- conf->default_devconf.vlan_strip_offload;
+ /* copy tso config from default device */
+ _ (tso)
- /* copy tso config from default device */
- _(tso)
+ /* copy tso config from default device */
+ _ (devargs)
- /* copy tso config from default device */
- _(devargs)
+ /* copy rss_queues config from default device */
+ _ (rss_queues)
- /* copy rss_queues config from default device */
- _(rss_queues)
-
- /* add DPDK EAL whitelist/blacklist entry */
- if (num_whitelisted > 0 && devconf->is_blacklisted == 0)
+ /* add DPDK EAL whitelist/blacklist entry */
+ if (num_whitelisted > 0 && devconf->is_blacklisted == 0 &&
+ devconf->dev_addr_type == VNET_DEV_ADDR_PCI)
{
- tmp = format (0, "-w%c", 0);
+ tmp = format (0, "-a%c", 0);
vec_add1 (conf->eal_init_args, tmp);
if (devconf->devargs)
{
- tmp = format (0, "%U,%s", format_vlib_pci_addr, &devconf->pci_addr, devconf->devargs, 0);
+ tmp = format (0, "%U,%s%c", format_vlib_pci_addr,
+ &devconf->pci_addr, devconf->devargs, 0);
}
else
{
}
vec_add1 (conf->eal_init_args, tmp);
}
- else if (num_whitelisted == 0 && devconf->is_blacklisted != 0)
+ else if (num_whitelisted == 0 && devconf->is_blacklisted != 0 &&
+ devconf->dev_addr_type == VNET_DEV_ADDR_PCI)
{
tmp = format (0, "-b%c", 0);
vec_add1 (conf->eal_init_args, tmp);
tmp = format (0, "%U%c", format_vlib_pci_addr, &devconf->pci_addr, 0);
vec_add1 (conf->eal_init_args, tmp);
}
- }));
- /* *INDENT-ON* */
+ }
#undef _
- /* set master-lcore */
- tmp = format (0, "--master-lcore%c", 0);
- vec_add1 (conf->eal_init_args, tmp);
- tmp = format (0, "%u%c", tm->main_lcore, 0);
- vec_add1 (conf->eal_init_args, tmp);
-
-
if (socket_mem)
clib_warning ("socket-mem argument is deprecated");
ret = rte_eal_init (vec_len (conf->eal_init_args),
(char **) conf->eal_init_args);
+ /* enable the AVX-512 vPMDs in DPDK */
+ if (clib_cpu_supports_avx512_bitalg () &&
+ conf->max_simd_bitwidth == DPDK_MAX_SIMD_BITWIDTH_DEFAULT)
+ rte_vect_set_max_simd_bitwidth (RTE_VECT_SIMD_512);
+ else if (conf->max_simd_bitwidth != DPDK_MAX_SIMD_BITWIDTH_DEFAULT)
+ rte_vect_set_max_simd_bitwidth (conf->max_simd_bitwidth ==
+ DPDK_MAX_SIMD_BITWIDTH_256 ?
+ RTE_VECT_SIMD_256 :
+ RTE_VECT_SIMD_512);
+
/* lazy umount hugepages */
umount2 ((char *) huge_dir_path, MNT_DETACH);
rmdir ((char *) huge_dir_path);
u32 hw_flags = 0;
u8 hw_flags_chg = 0;
- /* only update link state for PMD interfaces */
- if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
- return;
-
xd->time_last_link_update = now ? now : xd->time_last_link_update;
clib_memset (&xd->link, 0, sizeof (xd->link));
rte_eth_link_get_nowait (xd->port_id, &xd->link);
if (LINK_STATE_ELOGS)
{
- vlib_main_t *vm = vlib_get_main ();
ELOG_TYPE_DECLARE (e) =
{
.format =
u8 old_link_state;
u8 new_link_state;
} *ed;
- ed = ELOG_DATA (&vm->elog_main, e);
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
ed->sw_if_index = xd->sw_if_index;
ed->admin_up = (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0;
ed->old_link_state = (u8)
{
if (LINK_STATE_ELOGS)
{
- vlib_main_t *vm = vlib_get_main ();
-
ELOG_TYPE_DECLARE (e) =
{
.format =
u32 sw_if_index;
u32 flags;
} *ed;
- ed = ELOG_DATA (&vm->elog_main, e);
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
ed->sw_if_index = xd->sw_if_index;
ed->flags = hw_flags;
}
if (error)
clib_error_report (error);
- error = dpdk_cryptodev_init (vm);
- if (error)
+ if (dpdk_cryptodev_init)
{
- vlib_log_warn (dpdk_main.log_cryptodev, "%U", format_clib_error, error);
- clib_error_free (error);
+ error = dpdk_cryptodev_init (vm);
+ if (error)
+ {
+ vlib_log_warn (dpdk_main.log_cryptodev, "%U", format_clib_error,
+ error);
+ clib_error_free (error);
+ }
}
tm->worker_thread_release = 1;
"Data in cache line 0 is bigger than cache line size");
STATIC_ASSERT (offsetof (frame_queue_trace_t, cacheline0) == 0,
"Cache line marker must be 1st element in frame_queue_trace_t");
- STATIC_ASSERT (RTE_CACHE_LINE_SIZE == 1 << CLIB_LOG2_CACHE_LINE_BYTES,
- "DPDK RTE CACHE LINE SIZE does not match with 1<<CLIB_LOG2_CACHE_LINE_BYTES");
dpdk_cli_reference ();
- dm->vlib_main = vm;
- dm->vnet_main = vnet_get_main ();
dm->conf = &dpdk_config_main;
- dm->conf->nchannels = 4;
vec_add1 (dm->conf->eal_init_args, (u8 *) "vnet");
- vec_add1 (dm->conf->eal_init_args, (u8 *) "--in-memory");
-
- /* Default vlib_buffer_t flags, DISABLES tcp/udp checksumming... */
- dm->buffer_flags_template = (VLIB_BUFFER_TOTAL_LENGTH_VALID |
- VLIB_BUFFER_EXT_HDR_VALID |
- VNET_BUFFER_F_L4_CHECKSUM_COMPUTED |
- VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
dm->stat_poll_interval = DPDK_STATS_POLL_INTERVAL;
dm->link_state_poll_interval = DPDK_LINK_POLL_INTERVAL;
dm->log_default = vlib_log_register_class ("dpdk", 0);
dm->log_cryptodev = vlib_log_register_class ("dpdk", "cryptodev");
- dm->log_ipsec = vlib_log_register_class ("dpdk", "ipsec");
return error;
}
VLIB_INIT_FUNCTION (dpdk_init);
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
+static clib_error_t *
+dpdk_worker_thread_init (vlib_main_t *vm)
+{
+ if (rte_thread_register () < 0)
+ clib_panic ("dpdk: cannot register thread %u - %s", vm->thread_index,
+ rte_strerror (rte_errno));
+ return 0;
+}
+
+VLIB_WORKER_INIT_FUNCTION (dpdk_worker_thread_init);