#include <vnet/ethernet/ethernet.h>
#include <dpdk/buffer.h>
#include <dpdk/device/dpdk.h>
+#include <dpdk/cryptodev/cryptodev.h>
#include <vlib/pci/pci.h>
#include <vlib/vmbus/vmbus.h>
{
dpdk_main_t *dm = &dpdk_main;
dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
- u32 old = 0;
+ u32 old = (xd->flags & DPDK_DEVICE_FLAG_PROMISC) != 0;
- if (ETHERNET_INTERFACE_FLAG_CONFIG_PROMISC (flags))
- {
- old = (xd->flags & DPDK_DEVICE_FLAG_PROMISC) != 0;
-
- if (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
- xd->flags |= DPDK_DEVICE_FLAG_PROMISC;
- else
- xd->flags &= ~DPDK_DEVICE_FLAG_PROMISC;
-
- if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
- {
- if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
- rte_eth_promiscuous_enable (xd->port_id);
- else
- rte_eth_promiscuous_disable (xd->port_id);
- }
- }
- else if (ETHERNET_INTERFACE_FLAG_CONFIG_MTU (flags))
+ switch (flags)
{
+ case ETHERNET_INTERFACE_FLAG_DEFAULT_L3:
+ /* set to L3/non-promisc mode */
+ xd->flags &= ~DPDK_DEVICE_FLAG_PROMISC;
+ break;
+ case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
+ xd->flags |= DPDK_DEVICE_FLAG_PROMISC;
+ break;
+ case ETHERNET_INTERFACE_FLAG_MTU:
xd->port_conf.rxmode.max_rx_pkt_len = hi->max_packet_bytes;
dpdk_device_setup (xd);
+ return 0;
+ default:
+ return ~0;
}
- return old;
-}
-static void
-dpdk_device_lock_init (dpdk_device_t * xd)
-{
- int q;
- vec_validate (xd->lockp, xd->tx_q_used - 1);
- for (q = 0; q < xd->tx_q_used; q++)
+ if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
{
- xd->lockp[q] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- clib_memset ((void *) xd->lockp[q], 0, CLIB_CACHE_LINE_BYTES);
+ if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
+ rte_eth_promiscuous_enable (xd->port_id);
+ else
+ rte_eth_promiscuous_disable (xd->port_id);
}
+
+ return old;
}
static int
return 0;
}
+static void
+dpdk_enable_l4_csum_offload (dpdk_device_t * xd)
+{
+ xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+ xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+ xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD |
+ DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM;
+}
+
static clib_error_t *
dpdk_lib_init (dpdk_main_t * dm)
{
int vlan_off;
struct rte_eth_dev_info dev_info;
struct rte_pci_device *pci_dev;
- struct rte_eth_link l;
dpdk_portid_t next_port_id;
dpdk_device_config_t *devconf = 0;
vlib_pci_addr_t pci_addr;
if (!rte_eth_dev_is_valid_port(i))
continue;
- rte_eth_link_get_nowait (i, &l);
rte_eth_dev_info_get (i, &dev_info);
if (dev_info.device == 0)
xd->flags |= DPDK_DEVICE_FLAG_RX_IP4_CKSUM;
}
+ if (dm->conf->enable_tcp_udp_checksum)
+ {
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM)
+ xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_UDP_CKSUM;
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM)
+ xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TCP_CKSUM;
+ }
+
if (dm->conf->no_multi_seg)
{
xd->port_conf.txmode.offloads &= ~DEV_TX_OFFLOAD_MULTI_SEGS;
VNET_FLOW_ACTION_REDIRECT_TO_NODE |
VNET_FLOW_ACTION_REDIRECT_TO_QUEUE |
VNET_FLOW_ACTION_BUFFER_ADVANCE |
- VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP;
+ VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP |
+ VNET_FLOW_ACTION_RSS;
if (dm->conf->no_tx_checksum_offload == 0)
{
}
break;
+ /* iAVF */
+ case VNET_DPDK_PMD_IAVF:
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
+ xd->supported_flow_actions = VNET_FLOW_ACTION_MARK |
+ VNET_FLOW_ACTION_REDIRECT_TO_NODE |
+ VNET_FLOW_ACTION_REDIRECT_TO_QUEUE |
+ VNET_FLOW_ACTION_BUFFER_ADVANCE |
+ VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP;
+
+ if (dm->conf->no_tx_checksum_offload == 0)
+ {
+ xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+ xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+ xd->flags |=
+ DPDK_DEVICE_FLAG_TX_OFFLOAD |
+ DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM;
+ }
+ break;
+
case VNET_DPDK_PMD_THUNDERX:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
/* Cisco VIC */
case VNET_DPDK_PMD_ENIC:
- xd->port_type = port_type_from_link_speed (l.link_speed);
+ {
+ struct rte_eth_link l;
+ rte_eth_link_get_nowait (i, &l);
+ xd->port_type = port_type_from_link_speed (l.link_speed);
+ if (dm->conf->enable_tcp_udp_checksum)
+ dpdk_enable_l4_csum_offload (xd);
+ }
break;
/* Intel Red Rock Canyon */
/* virtio */
case VNET_DPDK_PMD_VIRTIO:
+ xd->port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G;
xd->nb_rx_desc = DPDK_NB_RX_DESC_VIRTIO;
xd->nb_tx_desc = DPDK_NB_TX_DESC_VIRTIO;
break;
case VNET_DPDK_PMD_NETVSC:
- xd->port_type = port_type_from_link_speed (l.link_speed);
+ {
+ struct rte_eth_link l;
+ rte_eth_link_get_nowait (i, &l);
+ xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
+ }
break;
default:
else
rte_eth_macaddr_get (i, (void *) addr);
- if (xd->tx_q_used < tm->n_vlib_mains)
- dpdk_device_lock_init (xd);
-
xd->port_id = i;
xd->device_index = xd - dm->devices;
xd->per_interface_next_index = ~0;
/* assign interface to input thread */
int q;
-
error = ethernet_register_interface
(dm->vnet_main, dpdk_device_class.index, xd->device_index,
/* ethernet address */ addr,
hi->max_packet_bytes = mtu;
hi->max_supported_packet_bytes = max_rx_frame;
hi->numa_node = xd->cpu_socket;
+
+ /* Indicate ability to support L3 DMAC filtering and
+ * initialize interface to L3 non-promisc mode */
+ hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER;
+ ethernet_set_flags (dm->vnet_main, xd->hw_if_index,
+ ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
}
if (dm->conf->no_tx_checksum_offload == 0)
dpdk_device_setup (xd);
+ /* rss queues should be configured after dpdk_device_setup() */
+ if (devconf->rss_queues != NULL)
+ {
+ if (vnet_hw_interface_set_rss_queues
+ (vnet_get_main (), hi, devconf->rss_queues))
+ {
+ clib_warning ("%s: Failed to set rss queues", hi->name);
+ }
+ }
+
if (vec_len (xd->errors))
- dpdk_log_err ("setup failed for device %U. Errors:\n %U",
- format_dpdk_device_name, i,
- format_dpdk_device_errors, xd);
+ dpdk_log_err ("setup failed for device %U. Errors:\n %U",
+ format_dpdk_device_name, i,
+ format_dpdk_device_errors, xd);
/*
* A note on Cisco VIC (PMD_ENIC) and VLAN:
* otherwise in the startup config.
*/
- vlan_off = rte_eth_dev_get_vlan_offload (xd->port_id);
- if (devconf->vlan_strip_offload == DPDK_DEVICE_VLAN_STRIP_ON)
- {
- vlan_off |= ETH_VLAN_STRIP_OFFLOAD;
- if (rte_eth_dev_set_vlan_offload (xd->port_id, vlan_off) >= 0)
- dpdk_log_info ("VLAN strip enabled for interface\n");
- else
- dpdk_log_warn ("VLAN strip cannot be supported by interface\n");
- xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
- }
- else
- {
- if (vlan_off & ETH_VLAN_STRIP_OFFLOAD)
- {
- vlan_off &= ~ETH_VLAN_STRIP_OFFLOAD;
- if (rte_eth_dev_set_vlan_offload (xd->port_id, vlan_off) >= 0)
- dpdk_log_warn ("set VLAN offload failed\n");
- }
- xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
- }
+ vlan_off = rte_eth_dev_get_vlan_offload (xd->port_id);
+ if (devconf->vlan_strip_offload == DPDK_DEVICE_VLAN_STRIP_ON)
+ {
+ vlan_off |= ETH_VLAN_STRIP_OFFLOAD;
+ if (rte_eth_dev_set_vlan_offload (xd->port_id, vlan_off) >= 0)
+ dpdk_log_info ("VLAN strip enabled for interface\n");
+ else
+ dpdk_log_warn ("VLAN strip cannot be supported by interface\n");
+ xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ else
+ {
+ if (vlan_off & ETH_VLAN_STRIP_OFFLOAD)
+ {
+ vlan_off &= ~ETH_VLAN_STRIP_OFFLOAD;
+ if (rte_eth_dev_set_vlan_offload (xd->port_id, vlan_off) >= 0)
+ dpdk_log_warn ("set VLAN offload failed\n");
+ }
+ xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
- if (hi)
- hi->max_packet_bytes = xd->port_conf.rxmode.max_rx_pkt_len
- - sizeof (ethernet_header_t);
- else
- dpdk_log_warn ("hi NULL");
+ if (hi)
+ hi->max_packet_bytes = xd->port_conf.rxmode.max_rx_pkt_len
+ - sizeof (ethernet_header_t);
+ else
+ dpdk_log_warn ("hi NULL");
- if (dm->conf->no_multi_seg)
- mtu = mtu > ETHER_MAX_LEN ? ETHER_MAX_LEN : mtu;
+ if (dm->conf->no_multi_seg)
+ mtu = mtu > ETHER_MAX_LEN ? ETHER_MAX_LEN : mtu;
+
+ rte_eth_dev_set_mtu (xd->port_id, mtu);
+}
- rte_eth_dev_set_mtu (xd->port_id, mtu);
- }
/* *INDENT-ON* */
return 0;
/* Cavium FastlinQ QL41000 Series */
else if (d->vendor_id == 0x1077 && d->device_id >= 0x8070 && d->device_id <= 0x8090)
;
- /* Mellanox mlx4 */
+ /* Mellanox CX3, CX3VF */
else if (d->vendor_id == 0x15b3 && d->device_id >= 0x1003 && d->device_id <= 0x1004)
{
continue;
}
- /* Mellanox mlx5 */
+ /* Mellanox CX4, CX4VF, CX4LX, CX4LXVF, CX5, CX5VF, CX5EX, CX5EXVF */
else if (d->vendor_id == 0x15b3 && d->device_id >= 0x1013 && d->device_id <= 0x101a)
{
continue;
}
+ /* Mellanox CX6, CX6VF, CX6DX, CX6DXVF */
+ else if (d->vendor_id == 0x15b3 && d->device_id >= 0x101b && d->device_id <= 0x101e)
+ {
+ continue;
+ }
/* Broadcom NetXtreme S, and E series only */
else if (d->vendor_id == 0x14e4 &&
((d->device_id >= 0x16c0 &&
}
else if (unformat (input, "devargs %s", &devconf->devargs))
;
+ else if (unformat (input, "rss-queues %U",
+ unformat_bitmap_list, &devconf->rss_queues))
+ ;
else
{
error = clib_error_return (0, "unknown input `%U'",
u8 file_prefix = 0;
u8 *socket_mem = 0;
u8 *huge_dir_path = 0;
- u32 vendor, device;
+ u32 vendor, device, domain, bus, func;
huge_dir_path =
format (0, "%s/hugepages%c", vlib_unix_get_runtime_dir (), 0);
tmp = format (0, "--no-pci%c", 0);
vec_add1 (conf->eal_init_args, tmp);
}
+ else
+ if (unformat
+ (input, "blacklist %x:%x:%x.%x", &domain, &bus, &device, &func))
+ {
+ tmp =
+ format (0, "-b %04x:%02x:%02x.%x%c", domain, bus, device, func,
+ 0);
+ vec_add1 (conf->eal_init_args, tmp);
+ }
else if (unformat (input, "blacklist %x:%x", &vendor, &device))
{
u32 blacklist_entry;
/* copy tso config from default device */
_(devargs)
+ /* copy rss_queues config from default device */
+ _(rss_queues)
+
/* add DPDK EAL whitelist/blacklist entry */
if (num_whitelisted > 0 && devconf->is_blacklisted == 0)
{
error = dpdk_lib_init (dm);
+ if (error)
+ clib_error_report (error);
+
+ error = dpdk_cryptodev_init (vm);
if (error)
clib_error_report (error);