X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fdpdk%2Fdevice%2Finit.c;h=78374af4b0f18f5cce1043dc5c9ad6c6bdc71757;hb=19ff0c369;hp=61057776ebfa06fbf1ae0d71188df04042de6681;hpb=cb3b7052357b57ab927edf178baa1412d7df81d5;p=vpp.git diff --git a/src/plugins/dpdk/device/init.c b/src/plugins/dpdk/device/init.c index 61057776ebf..78374af4b0f 100644 --- a/src/plugins/dpdk/device/init.c +++ b/src/plugins/dpdk/device/init.c @@ -22,10 +22,15 @@ #include #include +#include +#include #include +#include #include +#include #include +#include #include #include @@ -34,6 +39,7 @@ #include #include #include +#include #include @@ -46,19 +52,6 @@ dpdk_config_main_t dpdk_config_main; /* Port configuration, mildly modified Intel app values */ -static struct rte_eth_conf port_conf_template = { - .rxmode = { - .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .hw_strip_crc = 0, /**< CRC stripped by hardware */ - }, - .txmode = { - .mq_mode = ETH_MQ_TX_NONE, - }, -}; - static dpdk_port_type_t port_type_from_speed_capa (struct rte_eth_dev_info *dev_info) { @@ -87,104 +80,125 @@ port_type_from_speed_capa (struct rte_eth_dev_info *dev_info) return VNET_DPDK_PORT_TYPE_UNKNOWN; } +static dpdk_port_type_t +port_type_from_link_speed (u32 link_speed) +{ + switch (link_speed) + { + case ETH_SPEED_NUM_1G: + return VNET_DPDK_PORT_TYPE_ETH_1G; + case ETH_SPEED_NUM_2_5G: + return VNET_DPDK_PORT_TYPE_ETH_2_5G; + case ETH_SPEED_NUM_5G: + return VNET_DPDK_PORT_TYPE_ETH_5G; + case ETH_SPEED_NUM_10G: + return VNET_DPDK_PORT_TYPE_ETH_10G; + case ETH_SPEED_NUM_20G: + return VNET_DPDK_PORT_TYPE_ETH_20G; + case ETH_SPEED_NUM_25G: + return VNET_DPDK_PORT_TYPE_ETH_25G; + case ETH_SPEED_NUM_40G: + return VNET_DPDK_PORT_TYPE_ETH_40G; + case ETH_SPEED_NUM_50G: + return VNET_DPDK_PORT_TYPE_ETH_50G; + case ETH_SPEED_NUM_56G: + return VNET_DPDK_PORT_TYPE_ETH_56G; + case ETH_SPEED_NUM_100G: + return VNET_DPDK_PORT_TYPE_ETH_100G; + default: + return VNET_DPDK_PORT_TYPE_UNKNOWN; + } +} static u32 dpdk_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags) { dpdk_main_t *dm = &dpdk_main; dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance); - u32 old = 0; + u32 old = (xd->flags & DPDK_DEVICE_FLAG_PROMISC) != 0; - if (ETHERNET_INTERFACE_FLAG_CONFIG_PROMISC (flags)) - { - old = (xd->flags & DPDK_DEVICE_FLAG_PROMISC) != 0; - - if (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL) - xd->flags |= DPDK_DEVICE_FLAG_PROMISC; - else - xd->flags &= ~DPDK_DEVICE_FLAG_PROMISC; - - if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) - { - if (xd->flags & DPDK_DEVICE_FLAG_PROMISC) - rte_eth_promiscuous_enable (xd->port_id); - else - rte_eth_promiscuous_disable (xd->port_id); - } - } - else if (ETHERNET_INTERFACE_FLAG_CONFIG_MTU (flags)) + switch (flags) { + case ETHERNET_INTERFACE_FLAG_DEFAULT_L3: + /* set to L3/non-promisc mode */ + xd->flags &= ~DPDK_DEVICE_FLAG_PROMISC; + break; + case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL: + xd->flags |= DPDK_DEVICE_FLAG_PROMISC; + break; + case ETHERNET_INTERFACE_FLAG_MTU: xd->port_conf.rxmode.max_rx_pkt_len = hi->max_packet_bytes; dpdk_device_setup (xd); + return 0; + default: + return ~0; } + + if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) + { + if (xd->flags & DPDK_DEVICE_FLAG_PROMISC) + rte_eth_promiscuous_enable (xd->port_id); + else + rte_eth_promiscuous_disable (xd->port_id); + } + return old; } -static void -dpdk_device_lock_init (dpdk_device_t * xd) +static int +dpdk_port_crc_strip_enabled (dpdk_device_t * xd) { - int q; - vec_validate (xd->lockp, xd->tx_q_used - 1); - for (q = 0; q < xd->tx_q_used; q++) - { - xd->lockp[q] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, - CLIB_CACHE_LINE_BYTES); - memset ((void *) xd->lockp[q], 0, CLIB_CACHE_LINE_BYTES); - } + return !(xd->port_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC); } -static struct rte_mempool_ops * -get_ops_by_name (char *ops_name) +/* The function check_l3cache helps check if Level 3 cache exists or not on current CPUs + return value 1: exist. + return value 0: not exist. +*/ +static int +check_l3cache () { - u32 i; - for (i = 0; i < rte_mempool_ops_table.num_ops; i++) + struct dirent *dp; + clib_error_t *err; + const char *sys_cache_dir = "/sys/devices/system/cpu/cpu0/cache"; + DIR *dir_cache = opendir (sys_cache_dir); + + if (dir_cache == NULL) + return -1; + + while ((dp = readdir (dir_cache)) != NULL) { - if (!strcmp (ops_name, rte_mempool_ops_table.ops[i].name)) - return &rte_mempool_ops_table.ops[i]; + if (dp->d_type == DT_DIR) + { + u8 *p = NULL; + int level_cache = -1; + + p = format (p, "%s/%s/%s%c", sys_cache_dir, dp->d_name, "level", 0); + if ((err = clib_sysfs_read ((char *) p, "%d", &level_cache))) + clib_error_free (err); + + if (level_cache == 3) + { + closedir (dir_cache); + return 1; + } + } } + if (dir_cache != NULL) + closedir (dir_cache); + return 0; } -static int -dpdk_ring_alloc (struct rte_mempool *mp) +static void +dpdk_enable_l4_csum_offload (dpdk_device_t * xd) { - u32 rg_flags = 0, count; - i32 ret; - char rg_name[RTE_RING_NAMESIZE]; - struct rte_ring *r; - - ret = snprintf (rg_name, sizeof (rg_name), RTE_MEMPOOL_MZ_FORMAT, mp->name); - if (ret < 0 || ret >= (i32) sizeof (rg_name)) - return -ENAMETOOLONG; - - /* ring flags */ - if (mp->flags & MEMPOOL_F_SP_PUT) - rg_flags |= RING_F_SP_ENQ; - if (mp->flags & MEMPOOL_F_SC_GET) - rg_flags |= RING_F_SC_DEQ; - - count = rte_align32pow2 (mp->size + 1); - /* - * Allocate the ring that will be used to store objects. - * Ring functions will return appropriate errors if we are - * running as a secondary process etc., so no checks made - * in this function for that condition. - */ - /* XXX can we get memory from the right socket? */ - r = clib_mem_alloc_aligned (rte_ring_get_memsize (count), - CLIB_CACHE_LINE_BYTES); - - /* XXX rte_ring_lookup will not work */ - - ret = rte_ring_init (r, rg_name, count, rg_flags); - if (ret) - return ret; - - mp->pool_data = r; - - return 0; + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; + xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD | + DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; } static clib_error_t * @@ -192,7 +206,6 @@ dpdk_lib_init (dpdk_main_t * dm) { u32 nports; u32 mtu, max_rx_frame; - u32 nb_desc = 0; int i; clib_error_t *error; vlib_main_t *vm = vlib_get_main (); @@ -203,39 +216,14 @@ dpdk_lib_init (dpdk_main_t * dm) dpdk_device_t *xd; vlib_pci_addr_t last_pci_addr; u32 last_pci_addr_port = 0; - vlib_thread_registration_t *tr_hqos; - uword *p_hqos; - - u32 next_hqos_cpu = 0; u8 af_packet_instance_num = 0; - u8 bond_ether_instance_num = 0; last_pci_addr.as_u32 = ~0; - dm->hqos_cpu_first_index = 0; - dm->hqos_cpu_count = 0; - - /* find out which cpus will be used for I/O TX */ - p_hqos = hash_get_mem (tm->thread_registrations_by_name, "hqos-threads"); - tr_hqos = p_hqos ? (vlib_thread_registration_t *) p_hqos[0] : 0; - - if (tr_hqos && tr_hqos->count > 0) - { - dm->hqos_cpu_first_index = tr_hqos->first_index; - dm->hqos_cpu_count = tr_hqos->count; - } - - vec_validate_aligned (dm->devices_by_hqos_cpu, tm->n_vlib_mains - 1, - CLIB_CACHE_LINE_BYTES); - -#if RTE_VERSION < RTE_VERSION_NUM(18, 5, 0, 0) - nports = rte_eth_dev_count (); -#else nports = rte_eth_dev_count_avail (); -#endif if (nports < 1) { - dpdk_log_notice ("DPDK drivers found no ports..."); + dpdk_log_notice ("DPDK drivers found no Ethernet devices..."); } if (CLIB_DEBUG > 0) @@ -250,11 +238,8 @@ dpdk_lib_init (dpdk_main_t * dm) CLIB_CACHE_LINE_BYTES); for (i = 0; i < tm->n_vlib_mains; i++) { - vlib_buffer_free_list_t *fl; dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data, i); - fl = vlib_buffer_get_free_list (vm, - VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); - vlib_buffer_init_for_free_list (&ptd->buffer_template, fl); + clib_memset (&ptd->buffer_template, 0, sizeof (vlib_buffer_t)); ptd->buffer_template.flags = dm->buffer_flags_template; vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_TX] = (u32) ~ 0; } @@ -263,45 +248,64 @@ dpdk_lib_init (dpdk_main_t * dm) RTE_ETH_FOREACH_DEV(i) { u8 addr[6]; - u8 vlan_strip = 0; + int vlan_off; struct rte_eth_dev_info dev_info; struct rte_pci_device *pci_dev; - struct rte_eth_link l; + struct rte_vmbus_device *vmbus_dev; + dpdk_portid_t next_port_id; dpdk_device_config_t *devconf = 0; vlib_pci_addr_t pci_addr; + vlib_vmbus_addr_t vmbus_addr; uword *p = 0; if (!rte_eth_dev_is_valid_port(i)) continue; - rte_eth_link_get_nowait (i, &l); rte_eth_dev_info_get (i, &dev_info); -#if RTE_VERSION < RTE_VERSION_NUM(18, 5, 0, 0) - pci_dev = dev_info.pci_dev; -#else if (dev_info.device == 0) { - clib_warning ("DPDK bug: missing device info. Skipping %s device", + dpdk_log_notice ("DPDK bug: missing device info. Skipping %s device", dev_info.driver_name); continue; } - pci_dev = RTE_DEV_TO_PCI (dev_info.device); -#endif - if (pci_dev) /* bonded interface has no pci info */ + pci_dev = dpdk_get_pci_device (&dev_info); + + if (pci_dev) { pci_addr.domain = pci_dev->addr.domain; pci_addr.bus = pci_dev->addr.bus; pci_addr.slot = pci_dev->addr.devid; pci_addr.function = pci_dev->addr.function; - p = - hash_get (dm->conf->device_config_index_by_pci_addr, - pci_addr.as_u32); + p = hash_get (dm->conf->device_config_index_by_pci_addr, + pci_addr.as_u32); + } + + vmbus_dev = dpdk_get_vmbus_device (&dev_info); + + if (vmbus_dev) + { + unformat_input_t input_vmbus; + + unformat_init_vector (&input_vmbus, (u8 *) dev_info.device->name); + if (unformat (&input_vmbus, "%U", unformat_vlib_vmbus_addr, + &vmbus_addr)) + { + p = mhash_get (&dm->conf->device_config_index_by_vmbus_addr, + &vmbus_addr); + } } if (p) - devconf = pool_elt_at_index (dm->conf->dev_confs, p[0]); + { + devconf = pool_elt_at_index (dm->conf->dev_confs, p[0]); + /* If device is blacklisted, we should skip it */ + if (devconf->is_blacklisted) + { + continue; + } + } else devconf = &dm->conf->default_devconf; @@ -310,19 +314,26 @@ dpdk_lib_init (dpdk_main_t * dm) xd->nb_rx_desc = DPDK_NB_RX_DESC_DEFAULT; xd->nb_tx_desc = DPDK_NB_TX_DESC_DEFAULT; xd->cpu_socket = (i8) rte_eth_dev_socket_id (i); + if (p) + { + xd->name = devconf->name; + } + /* Handle representor devices that share the same PCI ID */ + if (dev_info.switch_info.domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) + { + if (dev_info.switch_info.port_id != (uint16_t)-1) + xd->interface_name_suffix = format (0, "%d", dev_info.switch_info.port_id); + } /* Handle interface naming for devices with multiple ports sharing same PCI ID */ - if (pci_dev) + else if (pci_dev && + ((next_port_id = rte_eth_find_next (i + 1)) != RTE_MAX_ETHPORTS)) { struct rte_eth_dev_info di = { 0 }; struct rte_pci_device *next_pci_dev; - rte_eth_dev_info_get (i + 1, &di); -#if RTE_VERSION < RTE_VERSION_NUM(18, 5, 0, 0) - next_pci_dev = di.pci_dev; -#else + rte_eth_dev_info_get (next_port_id, &di); next_pci_dev = di.device ? RTE_DEV_TO_PCI (di.device) : 0; -#endif - if (pci_dev && next_pci_dev && + if (next_pci_dev && pci_addr.as_u32 != last_pci_addr.as_u32 && memcmp (&pci_dev->addr, &next_pci_dev->addr, sizeof (struct rte_pci_addr)) == 0) @@ -347,23 +358,34 @@ dpdk_lib_init (dpdk_main_t * dm) clib_memcpy (&xd->tx_conf, &dev_info.default_txconf, sizeof (struct rte_eth_txconf)); + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) + { + xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM; + xd->flags |= DPDK_DEVICE_FLAG_RX_IP4_CKSUM; + } + + if (dm->conf->enable_tcp_udp_checksum) + { + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) + xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_UDP_CKSUM; + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) + xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TCP_CKSUM; + } + if (dm->conf->no_multi_seg) { - xd->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS; - port_conf_template.rxmode.jumbo_frame = 0; - port_conf_template.rxmode.enable_scatter = 0; + xd->port_conf.txmode.offloads &= ~DEV_TX_OFFLOAD_MULTI_SEGS; + xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; + xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_SCATTER; } else { - xd->tx_conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS; - port_conf_template.rxmode.jumbo_frame = 1; - port_conf_template.rxmode.enable_scatter = 1; + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER; xd->flags |= DPDK_DEVICE_FLAG_MAYBE_MULTISEG; } - clib_memcpy (&xd->port_conf, &port_conf_template, - sizeof (struct rte_eth_conf)); - xd->tx_q_used = clib_min (dev_info.max_tx_queues, tm->n_vlib_mains); if (devconf->num_tx_queues > 0 @@ -379,11 +401,24 @@ dpdk_lib_init (dpdk_main_t * dm) xd->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP; else - xd->port_conf.rx_adv_conf.rss_conf.rss_hf = devconf->rss_fn; + { + u64 unsupported_bits; + xd->port_conf.rx_adv_conf.rss_conf.rss_hf = devconf->rss_fn; + unsupported_bits = xd->port_conf.rx_adv_conf.rss_conf.rss_hf; + unsupported_bits &= ~dev_info.flow_type_rss_offloads; + if (unsupported_bits) + dpdk_log_warn ("Unsupported RSS hash functions: %U", + format_dpdk_rss_hf_name, unsupported_bits); + } + xd->port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; } else xd->rx_q_used = 1; + vec_validate_aligned (xd->rx_queues, xd->rx_q_used - 1, + CLIB_CACHE_LINE_BYTES); + xd->flags |= DPDK_DEVICE_FLAG_PMD; /* workaround for drivers not setting driver_name */ @@ -417,25 +452,32 @@ dpdk_lib_init (dpdk_main_t * dm) case VNET_DPDK_PMD_IGB: case VNET_DPDK_PMD_IXGBE: case VNET_DPDK_PMD_I40E: + case VNET_DPDK_PMD_ICE: xd->port_type = port_type_from_speed_capa (&dev_info); xd->supported_flow_actions = VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE | + VNET_FLOW_ACTION_REDIRECT_TO_QUEUE | VNET_FLOW_ACTION_BUFFER_ADVANCE | - VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP; + VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP | + VNET_FLOW_ACTION_RSS; if (dm->conf->no_tx_checksum_offload == 0) { - xd->tx_conf.txq_flags &= ~ETH_TXQ_FLAGS_NOXSUMS; + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD | DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; } + xd->port_conf.intr_conf.rxq = 1; break; case VNET_DPDK_PMD_CXGBE: case VNET_DPDK_PMD_MLX4: case VNET_DPDK_PMD_MLX5: + case VNET_DPDK_PMD_QEDE: + case VNET_DPDK_PMD_BNXT: xd->port_type = port_type_from_speed_capa (&dev_info); break; @@ -444,17 +486,52 @@ dpdk_lib_init (dpdk_main_t * dm) case VNET_DPDK_PMD_IXGBEVF: case VNET_DPDK_PMD_I40EVF: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; - xd->port_conf.rxmode.hw_strip_crc = 1; + if (dm->conf->no_tx_checksum_offload == 0) + { + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; + xd->flags |= + DPDK_DEVICE_FLAG_TX_OFFLOAD | + DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; + } + /*xd->port_conf.intr_conf.rxq = 1;*/ + break; + + /* iAVF */ + case VNET_DPDK_PMD_IAVF: + xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; + xd->supported_flow_actions = VNET_FLOW_ACTION_MARK | + VNET_FLOW_ACTION_REDIRECT_TO_NODE | + VNET_FLOW_ACTION_REDIRECT_TO_QUEUE | + VNET_FLOW_ACTION_BUFFER_ADVANCE | + VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP; + + if (dm->conf->no_tx_checksum_offload == 0) + { + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; + xd->flags |= + DPDK_DEVICE_FLAG_TX_OFFLOAD | + DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; + } + xd->port_conf.intr_conf.rxq = 1; break; case VNET_DPDK_PMD_THUNDERX: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; - xd->port_conf.rxmode.hw_strip_crc = 1; + + if (dm->conf->no_tx_checksum_offload == 0) + { + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; + xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD; + } break; case VNET_DPDK_PMD_ENA: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; - xd->port_conf.rxmode.enable_scatter = 0; + xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_SCATTER; + xd->port_conf.intr_conf.rxq = 1; break; case VNET_DPDK_PMD_DPAA2: @@ -463,20 +540,23 @@ dpdk_lib_init (dpdk_main_t * dm) /* Cisco VIC */ case VNET_DPDK_PMD_ENIC: - if (l.link_speed == 40000) - xd->port_type = VNET_DPDK_PORT_TYPE_ETH_40G; - else - xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G; + { + struct rte_eth_link l; + rte_eth_link_get_nowait (i, &l); + xd->port_type = port_type_from_link_speed (l.link_speed); + if (dm->conf->enable_tcp_udp_checksum) + dpdk_enable_l4_csum_offload (xd); + } break; /* Intel Red Rock Canyon */ case VNET_DPDK_PMD_FM10K: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_SWITCH; - xd->port_conf.rxmode.hw_strip_crc = 1; break; /* virtio */ case VNET_DPDK_PMD_VIRTIO: + xd->port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G; xd->nb_rx_desc = DPDK_NB_RX_DESC_VIRTIO; xd->nb_tx_desc = DPDK_NB_TX_DESC_VIRTIO; @@ -485,7 +565,7 @@ dpdk_lib_init (dpdk_main_t * dm) /* vmxnet3 */ case VNET_DPDK_PMD_VMXNET3: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G; - xd->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS; + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; break; case VNET_DPDK_PMD_AF_PACKET: @@ -493,11 +573,6 @@ dpdk_lib_init (dpdk_main_t * dm) xd->af_packet_instance_num = af_packet_instance_num++; break; - case VNET_DPDK_PMD_BOND: - xd->port_type = VNET_DPDK_PORT_TYPE_ETH_BOND; - xd->bond_instance_num = bond_ether_instance_num++; - break; - case VNET_DPDK_PMD_VIRTIO_USER: xd->port_type = VNET_DPDK_PORT_TYPE_VIRTIO_USER; break; @@ -515,16 +590,44 @@ dpdk_lib_init (dpdk_main_t * dm) xd->port_conf.intr_conf.lsc = 1; break; + case VNET_DPDK_PMD_NETVSC: + { + struct rte_eth_link l; + rte_eth_link_get_nowait (i, &l); + xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; + } + break; + default: xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN; } if (devconf->num_rx_desc) xd->nb_rx_desc = devconf->num_rx_desc; + else { + + /* If num_rx_desc is not specified by VPP user, the current CPU is working + with 2M page and has no L3 cache, default num_rx_desc is changed to 512 + from original 1024 to help reduce TLB misses. + */ + if ((clib_mem_get_default_hugepage_size () == 2 << 20) + && check_l3cache() == 0) + xd->nb_rx_desc = 512; + } if (devconf->num_tx_desc) xd->nb_tx_desc = devconf->num_tx_desc; - } + else { + + /* If num_tx_desc is not specified by VPP user, the current CPU is working + with 2M page and has no L3 cache, default num_tx_desc is changed to 512 + from original 1024 to help reduce TLB misses. + */ + if ((clib_mem_get_default_hugepage_size () == 2 << 20) + && check_l3cache() == 0) + xd->nb_tx_desc = 512; + } + } if (xd->pmd == VNET_DPDK_PMD_AF_PACKET) { @@ -537,10 +640,7 @@ dpdk_lib_init (dpdk_main_t * dm) addr[1] = 0xfe; } else - rte_eth_macaddr_get (i, (struct ether_addr *) addr); - - if (xd->tx_q_used < tm->n_vlib_mains) - dpdk_device_lock_init (xd); + rte_eth_macaddr_get (i, (void *) addr); xd->port_id = i; xd->device_index = xd - dm->devices; @@ -549,42 +649,6 @@ dpdk_lib_init (dpdk_main_t * dm) /* assign interface to input thread */ int q; - if (devconf->hqos_enabled) - { - xd->flags |= DPDK_DEVICE_FLAG_HQOS; - - int cpu; - if (devconf->hqos.hqos_thread_valid) - { - if (devconf->hqos.hqos_thread >= dm->hqos_cpu_count) - return clib_error_return (0, "invalid HQoS thread index"); - - cpu = dm->hqos_cpu_first_index + devconf->hqos.hqos_thread; - } - else - { - if (dm->hqos_cpu_count == 0) - return clib_error_return (0, "no HQoS threads available"); - - cpu = dm->hqos_cpu_first_index + next_hqos_cpu; - - next_hqos_cpu++; - if (next_hqos_cpu == dm->hqos_cpu_count) - next_hqos_cpu = 0; - - devconf->hqos.hqos_thread_valid = 1; - devconf->hqos.hqos_thread = cpu; - } - - dpdk_device_and_queue_t *dq; - vec_add2 (dm->devices_by_hqos_cpu[cpu], dq, 1); - dq->device = xd->device_index; - dq->queue_id = 0; - } - - /* count the number of descriptors used for this device */ - nb_desc += xd->nb_rx_desc + xd->nb_tx_desc * xd->tx_q_used; - error = ethernet_register_interface (dm->vnet_main, dpdk_device_class.index, xd->device_index, /* ethernet address */ addr, @@ -624,7 +688,7 @@ dpdk_lib_init (dpdk_main_t * dm) * MTU calculations. To interop with them increase mru but only * if the device's settings can support it. */ - if (xd->port_conf.rxmode.hw_strip_crc && + if (dpdk_port_crc_strip_enabled (xd) && (dev_info.max_rx_pktlen >= (ETHERNET_MAX_PACKET_BYTES + sizeof (ethernet_header_t) + 4))) @@ -637,7 +701,7 @@ dpdk_lib_init (dpdk_main_t * dm) max_rx_frame = ETHERNET_MAX_PACKET_BYTES; mtu = ETHERNET_MAX_PACKET_BYTES - sizeof (ethernet_header_t); - if (xd->port_conf.rxmode.hw_strip_crc && + if (dpdk_port_crc_strip_enabled (xd) && (dev_info.max_rx_pktlen >= (ETHERNET_MAX_PACKET_BYTES + 4))) { max_rx_frame += 4; @@ -657,7 +721,7 @@ dpdk_lib_init (dpdk_main_t * dm) mtu = dev_mtu; max_rx_frame = mtu + sizeof (ethernet_header_t); - if (xd->port_conf.rxmode.hw_strip_crc) + if (dpdk_port_crc_strip_enabled (xd)) { max_rx_frame += 4; } @@ -669,25 +733,30 @@ dpdk_lib_init (dpdk_main_t * dm) sw = vnet_get_hw_sw_interface (dm->vnet_main, xd->hw_if_index); xd->sw_if_index = sw->sw_if_index; - vnet_hw_interface_set_input_node (dm->vnet_main, xd->hw_if_index, - dpdk_input_node.index); + vnet_hw_if_set_input_node (dm->vnet_main, xd->hw_if_index, + dpdk_input_node.index); if (devconf->workers) { int i; q = 0; - clib_bitmap_foreach (i, devconf->workers, ({ - vnet_hw_interface_assign_rx_thread (dm->vnet_main, xd->hw_if_index, q++, - vdm->first_worker_thread_index + i); - })); + clib_bitmap_foreach (i, devconf->workers) { + dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, q); + rxq->queue_index = vnet_hw_if_register_rx_queue ( + dm->vnet_main, xd->hw_if_index, q++, + vdm->first_worker_thread_index + i); + } } else for (q = 0; q < xd->rx_q_used; q++) { - vnet_hw_interface_assign_rx_thread (dm->vnet_main, xd->hw_if_index, q, /* any */ - ~1); + dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, q); + rxq->queue_index = vnet_hw_if_register_rx_queue ( + dm->vnet_main, xd->hw_if_index, q, VNET_HW_IF_RXQ_THREAD_ANY); } + vnet_hw_if_update_runtime_data (dm->vnet_main, xd->hw_if_index); + /*Get vnet hardware interface */ hi = vnet_get_hw_interface (dm->vnet_main, xd->hw_if_index); @@ -697,83 +766,124 @@ dpdk_lib_init (dpdk_main_t * dm) { hi->max_packet_bytes = mtu; hi->max_supported_packet_bytes = max_rx_frame; + hi->numa_node = xd->cpu_socket; + + /* Indicate ability to support L3 DMAC filtering and + * initialize interface to L3 non-promisc mode */ + hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER; + ethernet_set_flags (dm->vnet_main, xd->hw_if_index, + ETHERNET_INTERFACE_FLAG_DEFAULT_L3); } if (dm->conf->no_tx_checksum_offload == 0) - if (xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) + if (xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD && hi != NULL) hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD; + if (devconf->tso == DPDK_DEVICE_TSO_ON && hi != NULL) + { + /*tcp_udp checksum must be enabled*/ + if ((dm->conf->enable_tcp_udp_checksum) && + (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD)) + { + hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO; + xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_UDP_TSO; + } + else + clib_warning ("%s: TCP/UDP checksum offload must be enabled", + hi->name); + } + dpdk_device_setup (xd); - if (vec_len (xd->errors)) - dpdk_log_err ("setup failed for device %U. Errors:\n %U", - format_dpdk_device_name, i, - format_dpdk_device_errors, xd); + /* rss queues should be configured after dpdk_device_setup() */ + if ((hi != NULL) && (devconf->rss_queues != NULL)) + { + if (vnet_hw_interface_set_rss_queues + (vnet_get_main (), hi, devconf->rss_queues)) + { + clib_warning ("%s: Failed to set rss queues", hi->name); + } + } - if (devconf->hqos_enabled) - { - clib_error_t *rv; - rv = dpdk_port_setup_hqos (xd, &devconf->hqos); - if (rv) - return rv; - } + if (vec_len (xd->errors)) + dpdk_log_err ("setup failed for device %U. Errors:\n %U", + format_dpdk_device_name, i, + format_dpdk_device_errors, xd); /* - * For cisco VIC vNIC, set default to VLAN strip enabled, unless - * specified otherwise in the startup config. - * For other NICs default to VLAN strip disabled, unless specified - * otherwis in the startup config. + * A note on Cisco VIC (PMD_ENIC) and VLAN: + * + * With Cisco VIC vNIC, every ingress packet is tagged. On a + * trunk vNIC (C series "standalone" server), packets on no VLAN + * are tagged with vlan 0. On an access vNIC (standalone or B + * series "blade" server), packets on the default/native VLAN + * are tagged with that vNIC's VLAN. VPP expects these packets + * to be untagged, and previously enabled VLAN strip on VIC by + * default. But it also broke vlan sub-interfaces. + * + * The VIC adapter has "untag default vlan" ingress VLAN rewrite + * mode, which removes tags from these packets. VPP now includes + * a local patch for the enic driver to use this untag mode, so + * enabling vlan stripping is no longer needed. In future, the + * driver + dpdk will have an API to set the mode after + * rte_eal_init. Then, this note and local patch will be + * removed. */ - if (xd->pmd == VNET_DPDK_PMD_ENIC) - { - if (devconf->vlan_strip_offload != DPDK_DEVICE_VLAN_STRIP_OFF) - vlan_strip = 1; /* remove vlan tag from VIC port by default */ - else - dpdk_log_warn ("VLAN strip disabled for interface\n"); - } - else if (devconf->vlan_strip_offload == DPDK_DEVICE_VLAN_STRIP_ON) - vlan_strip = 1; - - if (vlan_strip) - { - int vlan_off; - vlan_off = rte_eth_dev_get_vlan_offload (xd->port_id); - vlan_off |= ETH_VLAN_STRIP_OFFLOAD; - xd->port_conf.rxmode.hw_vlan_strip = vlan_off; - if (rte_eth_dev_set_vlan_offload (xd->port_id, vlan_off) == 0) - dpdk_log_info ("VLAN strip enabled for interface\n"); - else - dpdk_log_warn ("VLAN strip cannot be supported by interface\n"); - } - if (hi) - hi->max_packet_bytes = xd->port_conf.rxmode.max_rx_pkt_len - - sizeof (ethernet_header_t); - else - clib_warning ("hi NULL"); + /* + * VLAN stripping: default to VLAN strip disabled, unless specified + * otherwise in the startup config. + */ - if (dm->conf->no_multi_seg) - mtu = mtu > ETHER_MAX_LEN ? ETHER_MAX_LEN : mtu; + vlan_off = rte_eth_dev_get_vlan_offload (xd->port_id); + if (devconf->vlan_strip_offload == DPDK_DEVICE_VLAN_STRIP_ON) + { + vlan_off |= ETH_VLAN_STRIP_OFFLOAD; + if (rte_eth_dev_set_vlan_offload (xd->port_id, vlan_off) >= 0) + dpdk_log_info ("VLAN strip enabled for interface\n"); + else + dpdk_log_warn ("VLAN strip cannot be supported by interface\n"); + xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } + else + { + if (vlan_off & ETH_VLAN_STRIP_OFFLOAD) + { + vlan_off &= ~ETH_VLAN_STRIP_OFFLOAD; + if (rte_eth_dev_set_vlan_offload (xd->port_id, vlan_off) >= 0) + dpdk_log_warn ("set VLAN offload failed\n"); + } + xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } + + if (hi) + hi->max_packet_bytes = xd->port_conf.rxmode.max_rx_pkt_len + - sizeof (ethernet_header_t); + else + dpdk_log_warn ("hi NULL"); + + if (dm->conf->no_multi_seg) + mtu = mtu > ETHER_MAX_LEN ? ETHER_MAX_LEN : mtu; + + rte_eth_dev_set_mtu (xd->port_id, mtu); +} - rte_eth_dev_set_mtu (xd->port_id, mtu); - } /* *INDENT-ON* */ - if (nb_desc > dm->conf->num_mbufs) - dpdk_log_err ("%d mbufs allocated but total rx/tx ring size is %d\n", - dm->conf->num_mbufs, nb_desc); - return 0; } static void dpdk_bind_devices_to_uio (dpdk_config_main_t * conf) { + vlib_main_t *vm = vlib_get_main (); clib_error_t *error; u8 *pci_addr = 0; int num_whitelisted = vec_len (conf->dev_confs); vlib_pci_device_info_t *d = 0; vlib_pci_addr_t *addr = 0, *addrs; + int i; addrs = vlib_pci_get_all_dev_addrs (); /* *INDENT-OFF* */ @@ -787,10 +897,11 @@ dpdk_bind_devices_to_uio (dpdk_config_main_t * conf) vlib_pci_free_device_info (d); d = 0; } - d = vlib_pci_get_device_info (addr, &error); + d = vlib_pci_get_device_info (vm, addr, &error); if (error) { - clib_error_report (error); + vlib_log_warn (dpdk_main.log_default, "%U", format_clib_error, error); + clib_error_free (error); continue; } @@ -802,11 +913,42 @@ dpdk_bind_devices_to_uio (dpdk_config_main_t * conf) uword * p = hash_get (conf->device_config_index_by_pci_addr, addr->as_u32); if (!p) - continue; + { + skipped_pci: + continue; + } devconf = pool_elt_at_index (conf->dev_confs, p[0]); } + /* Enforce Device blacklist by vendor and device */ + for (i = 0; i < vec_len (conf->blacklist_by_pci_vendor_and_device); i++) + { + u16 vendor, device; + vendor = (u16)(conf->blacklist_by_pci_vendor_and_device[i] >> 16); + device = (u16)(conf->blacklist_by_pci_vendor_and_device[i] & 0xFFFF); + if (d->vendor_id == vendor && d->device_id == device) + { + /* + * Expected case: device isn't whitelisted, + * so blacklist it... + */ + if (devconf == 0) + { + /* Device is blacklisted */ + pool_get (conf->dev_confs, devconf); + hash_set (conf->device_config_index_by_pci_addr, addr->as_u32, + devconf - conf->dev_confs); + devconf->pci_addr.as_u32 = addr->as_u32; + devconf->dev_addr_type = VNET_DEV_ADDR_PCI; + devconf->is_blacklisted = 1; + goto skipped_pci; + } + else /* explicitly whitelisted, ignore the device blacklist */ + break; + } + } + /* virtio */ if (d->vendor_id == 0x1af4 && (d->device_id == VIRTIO_PCI_LEGACY_DEVICEID_NET || @@ -814,33 +956,71 @@ dpdk_bind_devices_to_uio (dpdk_config_main_t * conf) ; /* vmxnet3 */ else if (d->vendor_id == 0x15ad && d->device_id == 0x07b0) - ; + { + /* + * For vmxnet3 PCI, unless it is explicitly specified in the whitelist, + * the default is to put it in the blacklist. + */ + if (devconf == 0) + { + pool_get (conf->dev_confs, devconf); + hash_set (conf->device_config_index_by_pci_addr, addr->as_u32, + devconf - conf->dev_confs); + devconf->pci_addr.as_u32 = addr->as_u32; + devconf->is_blacklisted = 1; + } + } /* all Intel network devices */ else if (d->vendor_id == 0x8086 && d->device_class == PCI_CLASS_NETWORK_ETHERNET) ; /* all Intel QAT devices VFs */ else if (d->vendor_id == 0x8086 && d->device_class == PCI_CLASS_PROCESSOR_CO && - (d->device_id == 0x0443 || d->device_id == 0x37c9 || d->device_id == 0x19e3)) + (d->device_id == 0x0443 || d->device_id == 0x18a1 || d->device_id == 0x19e3 || + d->device_id == 0x37c9 || d->device_id == 0x6f55)) ; /* Cisco VIC */ - else if (d->vendor_id == 0x1137 && d->device_id == 0x0043) + else if (d->vendor_id == 0x1137 && + (d->device_id == 0x0043 || d->device_id == 0x0071)) ; /* Chelsio T4/T5 */ else if (d->vendor_id == 0x1425 && (d->device_id & 0xe000) == 0x4000) ; - /* Amazen Elastic Network Adapter */ + /* Amazon Elastic Network Adapter */ else if (d->vendor_id == 0x1d0f && d->device_id >= 0xec20 && d->device_id <= 0xec21) ; /* Cavium Network Adapter */ else if (d->vendor_id == 0x177d && d->device_id == 0x9712) ; - /* Mellanox */ + /* Cavium FastlinQ QL41000 Series */ + else if (d->vendor_id == 0x1077 && d->device_id >= 0x8070 && d->device_id <= 0x8090) + ; + /* Mellanox CX3, CX3VF */ + else if (d->vendor_id == 0x15b3 && d->device_id >= 0x1003 && d->device_id <= 0x1004) + { + continue; + } + /* Mellanox CX4, CX4VF, CX4LX, CX4LXVF, CX5, CX5VF, CX5EX, CX5EXVF */ else if (d->vendor_id == 0x15b3 && d->device_id >= 0x1013 && d->device_id <= 0x101a) { continue; } - /* Cavium FastlinQ QL41000 Series */ - else if (d->vendor_id == 0x1077 && d->device_id >= 0x8070 && d->device_id <= 0x8090) + /* Mellanox CX6, CX6VF, CX6DX, CX6DXVF */ + else if (d->vendor_id == 0x15b3 && d->device_id >= 0x101b && d->device_id <= 0x101e) + { + continue; + } + /* Broadcom NetXtreme S, and E series only */ + else if (d->vendor_id == 0x14e4 && + ((d->device_id >= 0x16c0 && + d->device_id != 0x16c6 && d->device_id != 0x16c7 && + d->device_id != 0x16dd && d->device_id != 0x16f7 && + d->device_id != 0x16fd && d->device_id != 0x16fe && + d->device_id != 0x170d && d->device_id != 0x170c && + d->device_id != 0x170e && d->device_id != 0x1712 && + d->device_id != 0x1713) || + (d->device_id == 0x1604 || d->device_id == 0x1605 || + d->device_id == 0x1614 || d->device_id == 0x1606 || + d->device_id == 0x1609 || d->device_id == 0x1614))) ; else { @@ -850,7 +1030,7 @@ dpdk_bind_devices_to_uio (dpdk_config_main_t * conf) continue; } - error = vlib_pci_bind_to_uio (addr, (char *) conf->uio_driver_name); + error = vlib_pci_bind_to_uio (vm, addr, (char *) conf->uio_driver_name); if (error) { @@ -861,6 +1041,7 @@ dpdk_bind_devices_to_uio (dpdk_config_main_t * conf) devconf - conf->dev_confs); devconf->pci_addr.as_u32 = addr->as_u32; } + devconf->dev_addr_type = VNET_DEV_ADDR_PCI; devconf->is_blacklisted = 1; clib_error_report (error); } @@ -870,38 +1051,144 @@ dpdk_bind_devices_to_uio (dpdk_config_main_t * conf) vlib_pci_free_device_info (d); } +static void +dpdk_bind_vmbus_devices_to_uio (dpdk_config_main_t * conf) +{ + clib_error_t *error; + vlib_vmbus_addr_t *addrs, *addr = 0; + int num_whitelisted = vec_len (conf->dev_confs); + int i; + + addrs = vlib_vmbus_get_all_dev_addrs (); + + /* *INDENT-OFF* */ + vec_foreach (addr, addrs) + { + dpdk_device_config_t *devconf = 0; + if (num_whitelisted) + { + uword *p = + mhash_get (&conf->device_config_index_by_vmbus_addr, addr); + if (!p) + { + /* No devices blacklisted, but have whitelisted. blacklist all + * non-whitelisted */ + pool_get (conf->dev_confs, devconf); + mhash_set (&conf->device_config_index_by_vmbus_addr, addr, + devconf - conf->dev_confs, 0); + devconf->vmbus_addr = *addr; + devconf->dev_addr_type = VNET_DEV_ADDR_VMBUS; + devconf->is_blacklisted = 1; + skipped_vmbus: + continue; + } + + devconf = pool_elt_at_index (conf->dev_confs, p[0]); + } + + /* Enforce Device blacklist by vmbus_addr */ + for (i = 0; i < vec_len (conf->blacklist_by_vmbus_addr); i++) + { + vlib_vmbus_addr_t *a1 = &conf->blacklist_by_vmbus_addr[i]; + vlib_vmbus_addr_t *a2 = addr; + if (memcmp (a1, a2, sizeof (vlib_vmbus_addr_t)) == 0) + { + if (devconf == 0) + { + /* Device not whitelisted */ + pool_get (conf->dev_confs, devconf); + mhash_set (&conf->device_config_index_by_vmbus_addr, addr, + devconf - conf->dev_confs, 0); + devconf->vmbus_addr = *addr; + devconf->dev_addr_type = VNET_DEV_ADDR_VMBUS; + devconf->is_blacklisted = 1; + goto skipped_vmbus; + } + else + { + break; + } + } + } + + error = vlib_vmbus_bind_to_uio (addr); + if (error) + { + if (devconf == 0) + { + pool_get (conf->dev_confs, devconf); + mhash_set (&conf->device_config_index_by_vmbus_addr, addr, + devconf - conf->dev_confs, 0); + devconf->vmbus_addr = *addr; + } + devconf->dev_addr_type = VNET_DEV_ADDR_VMBUS; + devconf->is_blacklisted = 1; + clib_error_report (error); + } + } + /* *INDENT-ON* */ +} + static clib_error_t * -dpdk_device_config (dpdk_config_main_t * conf, vlib_pci_addr_t pci_addr, - unformat_input_t * input, u8 is_default) +dpdk_device_config (dpdk_config_main_t *conf, void *addr, + dpdk_device_addr_type_t addr_type, unformat_input_t *input, + u8 is_default) { clib_error_t *error = 0; uword *p; - dpdk_device_config_t *devconf; + dpdk_device_config_t *devconf = 0; unformat_input_t sub_input; if (is_default) { devconf = &conf->default_devconf; } - else + else if (addr_type == VNET_DEV_ADDR_PCI) { - p = hash_get (conf->device_config_index_by_pci_addr, pci_addr.as_u32); + p = hash_get (conf->device_config_index_by_pci_addr, + ((vlib_pci_addr_t *) (addr))->as_u32); if (!p) { pool_get (conf->dev_confs, devconf); - hash_set (conf->device_config_index_by_pci_addr, pci_addr.as_u32, + hash_set (conf->device_config_index_by_pci_addr, + ((vlib_pci_addr_t *) (addr))->as_u32, devconf - conf->dev_confs); } else return clib_error_return (0, "duplicate configuration for PCI address %U", - format_vlib_pci_addr, &pci_addr); + format_vlib_pci_addr, addr); } + else if (addr_type == VNET_DEV_ADDR_VMBUS) + { + p = mhash_get (&conf->device_config_index_by_vmbus_addr, + (vlib_vmbus_addr_t *) (addr)); - devconf->pci_addr.as_u32 = pci_addr.as_u32; - devconf->hqos_enabled = 0; - dpdk_device_config_hqos_default (&devconf->hqos); + if (!p) + { + pool_get (conf->dev_confs, devconf); + mhash_set (&conf->device_config_index_by_vmbus_addr, addr, + devconf - conf->dev_confs, 0); + } + else + return clib_error_return ( + 0, "duplicate configuration for VMBUS address %U", + format_vlib_vmbus_addr, addr); + } + + if (addr_type == VNET_DEV_ADDR_PCI) + { + devconf->pci_addr.as_u32 = ((vlib_pci_addr_t *) (addr))->as_u32; + devconf->tso = DPDK_DEVICE_TSO_DEFAULT; + devconf->dev_addr_type = VNET_DEV_ADDR_PCI; + } + else if (addr_type == VNET_DEV_ADDR_VMBUS) + { + devconf->vmbus_addr = *((vlib_vmbus_addr_t *) (addr)); + devconf->tso = DPDK_DEVICE_TSO_DEFAULT; + devconf->dev_addr_type = VNET_DEV_ADDR_VMBUS; + } if (!input) return 0; @@ -917,6 +1204,8 @@ dpdk_device_config (dpdk_config_main_t * conf, vlib_pci_addr_t pci_addr, ; else if (unformat (input, "num-tx-desc %u", &devconf->num_tx_desc)) ; + else if (unformat (input, "name %s", &devconf->name)) + ; else if (unformat (input, "workers %U", unformat_bitmap_list, &devconf->workers)) ; @@ -932,19 +1221,19 @@ dpdk_device_config (dpdk_config_main_t * conf, vlib_pci_addr_t pci_addr, devconf->vlan_strip_offload = DPDK_DEVICE_VLAN_STRIP_OFF; else if (unformat (input, "vlan-strip-offload on")) devconf->vlan_strip_offload = DPDK_DEVICE_VLAN_STRIP_ON; - else - if (unformat - (input, "hqos %U", unformat_vlib_cli_sub_input, &sub_input)) + else if (unformat (input, "tso on")) { - devconf->hqos_enabled = 1; - error = unformat_hqos (&sub_input, &devconf->hqos); - if (error) - break; + devconf->tso = DPDK_DEVICE_TSO_ON; } - else if (unformat (input, "hqos")) + else if (unformat (input, "tso off")) { - devconf->hqos_enabled = 1; + devconf->tso = DPDK_DEVICE_TSO_OFF; } + else if (unformat (input, "devargs %s", &devconf->devargs)) + ; + else if (unformat (input, "rss-queues %U", + unformat_bitmap_list, &devconf->rss_queues)) + ; else { error = clib_error_return (0, "unknown input `%U'", @@ -961,11 +1250,10 @@ dpdk_device_config (dpdk_config_main_t * conf, vlib_pci_addr_t pci_addr, else if (devconf->workers && clib_bitmap_count_set_bits (devconf->workers) != devconf->num_rx_queues) - error = - clib_error_return (0, - "%U: number of worker threadds must be " - "equal to number of rx queues", format_vlib_pci_addr, - &pci_addr); + error = clib_error_return (0, + "%U: number of worker threads must be " + "equal to number of rx queues", + format_vlib_pci_addr, addr); return error; } @@ -1005,39 +1293,41 @@ static clib_error_t * dpdk_config (vlib_main_t * vm, unformat_input_t * input) { clib_error_t *error = 0; - dpdk_main_t *dm = &dpdk_main; dpdk_config_main_t *conf = &dpdk_config_main; vlib_thread_main_t *tm = vlib_get_thread_main (); dpdk_device_config_t *devconf; - vlib_pci_addr_t pci_addr; + vlib_pci_addr_t pci_addr = { 0 }; + vlib_vmbus_addr_t vmbus_addr = { 0 }; unformat_input_t sub_input; - uword x; + uword default_hugepage_sz, x; u8 *s, *tmp = 0; - u8 *rte_cmd = 0, *ethname = 0; - u32 log_level; int ret, i; int num_whitelisted = 0; + int eal_no_hugetlb = 0; u8 no_pci = 0; - u8 no_huge = 0; - u8 huge_dir = 0; + u8 no_vmbus = 0; u8 file_prefix = 0; u8 *socket_mem = 0; u8 *huge_dir_path = 0; + u32 vendor, device, domain, bus, func; huge_dir_path = format (0, "%s/hugepages%c", vlib_unix_get_runtime_dir (), 0); conf->device_config_index_by_pci_addr = hash_create (0, sizeof (uword)); - log_level = RTE_LOG_NOTICE; + mhash_init (&conf->device_config_index_by_vmbus_addr, sizeof (uword), + sizeof (vlib_vmbus_addr_t)); while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { /* Prime the pump */ if (unformat (input, "no-hugetlb")) { - vec_add1 (conf->eal_init_args, (u8 *) "no-huge"); - no_huge = 1; + vec_add1 (conf->eal_init_args, (u8 *) "--no-huge"); + eal_no_hugetlb = 1; } + else if (unformat (input, "telemetry")) + conf->enable_telemetry = 1; else if (unformat (input, "enable-tcp-udp-checksum")) conf->enable_tcp_udp_checksum = 1; @@ -1048,9 +1338,6 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) else if (unformat (input, "decimal-interface-names")) conf->interface_name_format_decimal = 1; - else if (unformat (input, "log-level %U", unformat_dpdk_log_level, &x)) - log_level = x; - else if (unformat (input, "no-multi-seg")) conf->no_multi_seg = 1; @@ -1058,8 +1345,7 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) &sub_input)) { error = - dpdk_device_config (conf, (vlib_pci_addr_t) (u32) ~ 1, &sub_input, - 1); + dpdk_device_config (conf, 0, VNET_DEV_ADDR_ANY, &sub_input, 1); if (error) return error; @@ -1069,7 +1355,8 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) (input, "dev %U %U", unformat_vlib_pci_addr, &pci_addr, unformat_vlib_cli_sub_input, &sub_input)) { - error = dpdk_device_config (conf, pci_addr, &sub_input, 0); + error = dpdk_device_config (conf, &pci_addr, VNET_DEV_ADDR_PCI, + &sub_input, 0); if (error) return error; @@ -1078,14 +1365,40 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) } else if (unformat (input, "dev %U", unformat_vlib_pci_addr, &pci_addr)) { - error = dpdk_device_config (conf, pci_addr, 0, 0); + error = + dpdk_device_config (conf, &pci_addr, VNET_DEV_ADDR_PCI, 0, 0); + + if (error) + return error; + + num_whitelisted++; + } + else if (unformat (input, "dev %U %U", unformat_vlib_vmbus_addr, + &vmbus_addr, unformat_vlib_cli_sub_input, &sub_input)) + { + error = dpdk_device_config (conf, &vmbus_addr, VNET_DEV_ADDR_VMBUS, + &sub_input, 0); if (error) return error; num_whitelisted++; } - else if (unformat (input, "num-mbufs %d", &conf->num_mbufs)) + else if (unformat (input, "dev %U", unformat_vlib_vmbus_addr, + &vmbus_addr)) + { + error = + dpdk_device_config (conf, &vmbus_addr, VNET_DEV_ADDR_VMBUS, 0, 0); + + if (error) + return error; + + num_whitelisted++; + } + else if (unformat (input, "num-mem-channels %d", &conf->nchannels)) + conf->nchannels_set_manually = 0; + else if (unformat (input, "num-crypto-mbufs %d", + &conf->num_crypto_mbufs)) ; else if (unformat (input, "uio-driver %s", &conf->uio_driver_name)) ; @@ -1097,8 +1410,38 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) tmp = format (0, "--no-pci%c", 0); vec_add1 (conf->eal_init_args, tmp); } - else if (unformat (input, "poll-sleep %d", &dm->poll_sleep_usec)) - ; + else if (unformat (input, "blacklist %U", unformat_vlib_vmbus_addr, + &vmbus_addr)) + { + vec_add1 (conf->blacklist_by_vmbus_addr, vmbus_addr); + } + else + if (unformat + (input, "blacklist %x:%x:%x.%x", &domain, &bus, &device, &func)) + { + tmp = format (0, "-b%c", 0); + vec_add1 (conf->eal_init_args, tmp); + tmp = + format (0, "%04x:%02x:%02x.%x%c", domain, bus, device, func, 0); + vec_add1 (conf->eal_init_args, tmp); + } + else if (unformat (input, "blacklist %x:%x", &vendor, &device)) + { + u32 blacklist_entry; + if (vendor > 0xFFFF) + return clib_error_return (0, "blacklist PCI vendor out of range"); + if (device > 0xFFFF) + return clib_error_return (0, "blacklist PCI device out of range"); + blacklist_entry = (vendor << 16) | (device & 0xffff); + vec_add1 (conf->blacklist_by_pci_vendor_and_device, + blacklist_entry); + } + else if (unformat (input, "no-vmbus")) + { + no_vmbus = 1; + tmp = format (0, "--no-vmbus%c", 0); + vec_add1 (conf->eal_init_args, tmp); + } #define _(a) \ else if (unformat(input, #a)) \ @@ -1111,9 +1454,7 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) #define _(a) \ else if (unformat(input, #a " %s", &s)) \ { \ - if (!strncmp(#a, "huge-dir", 8)) \ - huge_dir = 1; \ - else if (!strncmp(#a, "file-prefix", 11)) \ + if (!strncmp(#a, "file-prefix", 11)) \ file_prefix = 1; \ tmp = format (0, "--%s%c", #a, 0); \ vec_add1 (conf->eal_init_args, tmp); \ @@ -1162,109 +1503,42 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) if (!conf->uio_driver_name) conf->uio_driver_name = format (0, "auto%c", 0); - /* - * Use 1G huge pages if available. - */ - if (!no_huge && !huge_dir) + if (eal_no_hugetlb == 0) { - u32 x, *mem_by_socket = 0; - uword c = 0; - int rv; + vec_add1 (conf->eal_init_args, (u8 *) "--in-memory"); - umount ((char *) huge_dir_path); - - /* Process "socket-mem" parameter value */ - if (vec_len (socket_mem)) - { - unformat_input_t in; - unformat_init_vector (&in, socket_mem); - while (unformat_check_input (&in) != UNFORMAT_END_OF_INPUT) - { - if (unformat (&in, "%u,", &x)) - ; - else if (unformat (&in, "%u", &x)) - ; - else if (unformat (&in, ",")) - x = 0; - else - break; - - vec_add1 (mem_by_socket, x); - } - /* Note: unformat_free vec_frees(in.buffer), aka socket_mem... */ - unformat_free (&in); - socket_mem = 0; - } - else - { - /* *INDENT-OFF* */ - clib_bitmap_foreach (c, tm->cpu_socket_bitmap, ( - { - vec_validate(mem_by_socket, c); - mem_by_socket[c] = 64; /* default per-socket mem */ - } - )); - /* *INDENT-ON* */ - } + default_hugepage_sz = clib_mem_get_default_hugepage_size (); /* *INDENT-OFF* */ - clib_bitmap_foreach (c, tm->cpu_socket_bitmap, ( - { + clib_bitmap_foreach (x, tm->cpu_socket_bitmap) + { clib_error_t *e; + uword n_pages; + /* preallocate at least 16MB of hugepages per socket, + if more is needed it is up to consumer to preallocate more */ + n_pages = round_pow2 ((uword) 16 << 20, default_hugepage_sz); + n_pages /= default_hugepage_sz; - vec_validate(mem_by_socket, c); - - e = clib_sysfs_prealloc_hugepages(c, 2 << 10, mem_by_socket[c] / 2); - if (e) + if ((e = clib_sysfs_prealloc_hugepages(x, 0, n_pages))) clib_error_report (e); - })); + } /* *INDENT-ON* */ + } - if (mem_by_socket == 0) - { - error = clib_error_return (0, "mem_by_socket NULL"); - goto done; - } - _vec_len (mem_by_socket) = c + 1; - - /* regenerate socket_mem string */ - vec_foreach_index (x, mem_by_socket) - socket_mem = format (socket_mem, "%s%u", - socket_mem ? "," : "", mem_by_socket[x]); - socket_mem = format (socket_mem, "%c", 0); - - vec_free (mem_by_socket); - - error = vlib_unix_recursive_mkdir ((char *) huge_dir_path); - if (error) - { - goto done; - } - - rv = mount ("none", (char *) huge_dir_path, "hugetlbfs", 0, NULL); - - if (rv) - { - error = clib_error_return (0, "mount failed %d", errno); - goto done; - } + /* on/off dpdk's telemetry thread */ + if (conf->enable_telemetry == 0) + { + vec_add1 (conf->eal_init_args, (u8 *) "--no-telemetry"); + } - tmp = format (0, "--huge-dir%c", 0); + if (!file_prefix) + { + tmp = format (0, "--file-prefix%c", 0); vec_add1 (conf->eal_init_args, tmp); - tmp = format (0, "%s%c", huge_dir_path, 0); + tmp = format (0, "vpp%c", 0); vec_add1 (conf->eal_init_args, tmp); - if (!file_prefix) - { - tmp = format (0, "--file-prefix%c", 0); - vec_add1 (conf->eal_init_args, tmp); - tmp = format (0, "vpp%c", 0); - vec_add1 (conf->eal_init_args, tmp); - } } - vec_free (rte_cmd); - vec_free (ethname); - if (error) return error; @@ -1296,53 +1570,77 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) vec_insert (conf->eal_init_args, 2, 3); conf->eal_init_args[3] = (u8 *) "-n"; tmp = format (0, "%d", conf->nchannels); + vec_terminate_c_string (tmp); conf->eal_init_args[4] = tmp; } if (no_pci == 0 && geteuid () == 0) dpdk_bind_devices_to_uio (conf); + if (no_vmbus == 0 && geteuid () == 0) + dpdk_bind_vmbus_devices_to_uio (conf); + #define _(x) \ if (devconf->x == 0 && conf->default_devconf.x > 0) \ devconf->x = conf->default_devconf.x ; /* *INDENT-OFF* */ - pool_foreach (devconf, conf->dev_confs, ({ + pool_foreach (devconf, conf->dev_confs) { /* default per-device config items */ foreach_dpdk_device_config_item - /* add DPDK EAL whitelist/blacklist entry */ - if (num_whitelisted > 0 && devconf->is_blacklisted == 0) - { - tmp = format (0, "-w%c", 0); - vec_add1 (conf->eal_init_args, tmp); - tmp = format (0, "%U%c", format_vlib_pci_addr, &devconf->pci_addr, 0); - vec_add1 (conf->eal_init_args, tmp); - } - else if (num_whitelisted == 0 && devconf->is_blacklisted != 0) - { - tmp = format (0, "-b%c", 0); - vec_add1 (conf->eal_init_args, tmp); - tmp = format (0, "%U%c", format_vlib_pci_addr, &devconf->pci_addr, 0); - vec_add1 (conf->eal_init_args, tmp); - } - })); + /* copy vlan_strip config from default device */ + _ (vlan_strip_offload) + + /* copy tso config from default device */ + _ (tso) + + /* copy tso config from default device */ + _ (devargs) + + /* copy rss_queues config from default device */ + _ (rss_queues) + + /* add DPDK EAL whitelist/blacklist entry */ + if (num_whitelisted > 0 && devconf->is_blacklisted == 0 && + devconf->dev_addr_type == VNET_DEV_ADDR_PCI) + { + tmp = format (0, "-a%c", 0); + vec_add1 (conf->eal_init_args, tmp); + if (devconf->devargs) + { + tmp = format (0, "%U,%s%c", format_vlib_pci_addr, + &devconf->pci_addr, devconf->devargs, 0); + } + else + { + tmp = format (0, "%U%c", format_vlib_pci_addr, &devconf->pci_addr, 0); + } + vec_add1 (conf->eal_init_args, tmp); + } + else if (num_whitelisted == 0 && devconf->is_blacklisted != 0 && + devconf->dev_addr_type == VNET_DEV_ADDR_PCI) + { + tmp = format (0, "-b%c", 0); + vec_add1 (conf->eal_init_args, tmp); + tmp = format (0, "%U%c", format_vlib_pci_addr, &devconf->pci_addr, 0); + vec_add1 (conf->eal_init_args, tmp); + } + } /* *INDENT-ON* */ #undef _ /* set master-lcore */ - tmp = format (0, "--master-lcore%c", 0); + tmp = format (0, "--main-lcore%c", 0); vec_add1 (conf->eal_init_args, tmp); tmp = format (0, "%u%c", tm->main_lcore, 0); vec_add1 (conf->eal_init_args, tmp); - /* set socket-mem */ - tmp = format (0, "--socket-mem%c", 0); - vec_add1 (conf->eal_init_args, tmp); - tmp = format (0, "%s%c", socket_mem, 0); - vec_add1 (conf->eal_init_args, tmp); + + if (socket_mem) + clib_warning ("socket-mem argument is deprecated"); /* NULL terminate the "argv" vector, in case of stupidity */ vec_add1 (conf->eal_init_args, 0); @@ -1350,18 +1648,25 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) /* Set up DPDK eal and packet mbuf pool early. */ - rte_log_set_global_level (log_level); int log_fds[2] = { 0 }; if (pipe (log_fds) == 0) { - FILE *f = fdopen (log_fds[1], "a"); - if (f && rte_openlog_stream (f) == 0) + if (fcntl (log_fds[1], F_SETFL, O_NONBLOCK) == 0) { - clib_file_t t = { 0 }; - t.read_function = dpdk_log_read_ready; - t.file_descriptor = log_fds[0]; - t.description = format (0, "DPDK logging pipe"); - clib_file_add (&file_main, &t); + FILE *f = fdopen (log_fds[1], "a"); + if (f && rte_openlog_stream (f) == 0) + { + clib_file_t t = { 0 }; + t.read_function = dpdk_log_read_ready; + t.file_descriptor = log_fds[0]; + t.description = format (0, "DPDK logging pipe"); + clib_file_add (&file_main, &t); + } + } + else + { + close (log_fds[0]); + close (log_fds[1]); } } @@ -1372,10 +1677,16 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) conf->eal_init_args_str = format (conf->eal_init_args_str, "%s ", conf->eal_init_args[i]); - dpdk_log_warn ("EAL init args: %s", conf->eal_init_args_str); + vec_terminate_c_string (conf->eal_init_args_str); + + dpdk_log_notice ("EAL init args: %s", conf->eal_init_args_str); ret = rte_eal_init (vec_len (conf->eal_init_args), (char **) conf->eal_init_args); + /* enable the AVX-512 vPMDs in DPDK */ + if (clib_cpu_supports_avx512_bitalg ()) + rte_vect_set_max_simd_bitwidth (RTE_VECT_SIMD_512); + /* lazy umount hugepages */ umount2 ((char *) huge_dir_path, MNT_DETACH); rmdir ((char *) huge_dir_path); @@ -1384,36 +1695,10 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) if (ret < 0) return clib_error_return (0, "rte_eal_init returned %d", ret); - /* set custom ring memory allocator */ - { - struct rte_mempool_ops *ops = NULL; - - ops = get_ops_by_name ("ring_sp_sc"); - ops->alloc = dpdk_ring_alloc; - - ops = get_ops_by_name ("ring_mp_sc"); - ops->alloc = dpdk_ring_alloc; - - ops = get_ops_by_name ("ring_sp_mc"); - ops->alloc = dpdk_ring_alloc; - - ops = get_ops_by_name ("ring_mp_mc"); - ops->alloc = dpdk_ring_alloc; - } - /* main thread 1st */ - error = dpdk_buffer_pool_create (vm, conf->num_mbufs, rte_socket_id ()); - if (error) + if ((error = dpdk_buffer_pools_create (vm))) return error; - for (i = 0; i < RTE_MAX_LCORE; i++) - { - error = dpdk_buffer_pool_create (vm, conf->num_mbufs, - rte_lcore_to_socket_id (i)); - if (error) - return error; - } - done: return error; } @@ -1433,7 +1718,7 @@ dpdk_update_link_state (dpdk_device_t * xd, f64 now) return; xd->time_last_link_update = now ? now : xd->time_last_link_update; - memset (&xd->link, 0, sizeof (xd->link)); + clib_memset (&xd->link, 0, sizeof (xd->link)); rte_eth_link_get_nowait (xd->port_id, &xd->link); if (LINK_STATE_ELOGS) @@ -1460,15 +1745,7 @@ dpdk_update_link_state (dpdk_device_t * xd, f64 now) ed->new_link_state = (u8) xd->link.link_status; } - if ((xd->flags & (DPDK_DEVICE_FLAG_ADMIN_UP | DPDK_DEVICE_FLAG_BOND_SLAVE)) - && ((xd->link.link_status != 0) ^ - vnet_hw_interface_is_link_up (vnm, xd->hw_if_index))) - { - hw_flags_chg = 1; - hw_flags |= (xd->link.link_status ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0); - } - - if (hw_flags_chg || (xd->link.link_duplex != prev_link.link_duplex)) + if ((xd->link.link_duplex != prev_link.link_duplex)) { hw_flags_chg = 1; switch (xd->link.link_duplex) @@ -1483,54 +1760,18 @@ dpdk_update_link_state (dpdk_device_t * xd, f64 now) break; } } - if (hw_flags_chg || (xd->link.link_speed != prev_link.link_speed)) + if (xd->link.link_speed != prev_link.link_speed) + vnet_hw_interface_set_link_speed (vnm, xd->hw_if_index, + xd->link.link_speed * 1000); + + if (xd->link.link_status != prev_link.link_status) { hw_flags_chg = 1; - switch (xd->link.link_speed) - { - case ETH_SPEED_NUM_10M: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_10M; - break; - case ETH_SPEED_NUM_100M: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_100M; - break; - case ETH_SPEED_NUM_1G: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_1G; - break; - case ETH_SPEED_NUM_2_5G: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_2_5G; - break; - case ETH_SPEED_NUM_5G: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_5G; - break; - case ETH_SPEED_NUM_10G: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_10G; - break; - case ETH_SPEED_NUM_20G: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_20G; - break; - case ETH_SPEED_NUM_25G: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_25G; - break; - case ETH_SPEED_NUM_40G: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_40G; - break; - case ETH_SPEED_NUM_50G: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_50G; - break; - case ETH_SPEED_NUM_56G: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_56G; - break; - case ETH_SPEED_NUM_100G: - hw_flags |= VNET_HW_INTERFACE_FLAG_SPEED_100G; - break; - case 0: - break; - default: - dpdk_log_warn ("unknown link speed %d", xd->link.link_speed); - break; - } + + if (xd->link.link_status) + hw_flags |= VNET_HW_INTERFACE_FLAG_LINK_UP; } + if (hw_flags_chg) { if (LINK_STATE_ELOGS) @@ -1560,19 +1801,22 @@ static uword dpdk_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) { clib_error_t *error; - vnet_main_t *vnm = vnet_get_main (); dpdk_main_t *dm = &dpdk_main; - ethernet_main_t *em = ðernet_main; dpdk_device_t *xd; vlib_thread_main_t *tm = vlib_get_thread_main (); - int i; - int j; error = dpdk_lib_init (dm); if (error) clib_error_report (error); + error = dpdk_cryptodev_init (vm); + if (error) + { + vlib_log_warn (dpdk_main.log_cryptodev, "%U", format_clib_error, error); + clib_error_free (error); + } + tm->worker_thread_release = 1; f64 now = vlib_time_now (vm); @@ -1581,116 +1825,6 @@ dpdk_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) dpdk_update_link_state (xd, now); } - { - /* - * Extra set up for bond interfaces: - * 1. Setup MACs for bond interfaces and their slave links which was set - * in dpdk_device_setup() but needs to be done again here to take - * effect. - * 2. Set up info and register slave link state change callback handling. - * 3. Set up info for bond interface related CLI support. - */ -#if RTE_VERSION < RTE_VERSION_NUM(18, 5, 0, 0) - int nports = rte_eth_dev_count (); -#else - int nports = rte_eth_dev_count_avail (); -#endif - if (nports > 0) - { - /* *INDENT-OFF* */ - RTE_ETH_FOREACH_DEV(i) - { - xd = NULL; - for (j = 0; j < nports; j++) - { - if (dm->devices[j].port_id == i) - { - xd = &dm->devices[j]; - } - } - ASSERT (xd != NULL); - - if (xd->pmd == VNET_DPDK_PMD_BOND) - { - u8 addr[6]; - dpdk_portid_t slink[16]; - int nlink = rte_eth_bond_slaves_get (i, slink, 16); - if (nlink > 0) - { - vnet_hw_interface_t *bhi; - ethernet_interface_t *bei; - int rv; - - /* Get MAC of 1st slave link */ - rte_eth_macaddr_get - (slink[0], (struct ether_addr *) addr); - - /* Set MAC of bounded interface to that of 1st slave link */ - dpdk_log_info ("Set MAC for bond port %d BondEthernet%d", - i, xd->bond_instance_num); - rv = rte_eth_bond_mac_address_set - (i, (struct ether_addr *) addr); - if (rv) - dpdk_log_warn ("Set MAC addr failure rv=%d", rv); - - /* Populate MAC of bonded interface in VPP hw tables */ - bhi = vnet_get_hw_interface - (vnm, dm->devices[i].hw_if_index); - bei = pool_elt_at_index - (em->interfaces, bhi->hw_instance); - clib_memcpy (bhi->hw_address, addr, 6); - clib_memcpy (bei->address, addr, 6); - - /* Init l3 packet size allowed on bonded interface */ - bhi->max_packet_bytes = ETHERNET_MAX_PACKET_BYTES; - while (nlink >= 1) - { /* for all slave links */ - int slave = slink[--nlink]; - dpdk_device_t *sdev = &dm->devices[slave]; - vnet_hw_interface_t *shi; - vnet_sw_interface_t *ssi; - ethernet_interface_t *sei; - /* Add MAC to all slave links except the first one */ - if (nlink) - { - dpdk_log_info ("Add MAC for slave port %d", - slave); - rv = rte_eth_dev_mac_addr_add - (slave, (struct ether_addr *) addr, 0); - if (rv) - dpdk_log_warn ("Add MAC addr failure rv=%d", - rv); - } - /* Setup slave link state change callback handling */ - rte_eth_dev_callback_register - (slave, RTE_ETH_EVENT_INTR_LSC, - dpdk_port_state_callback, NULL); - dpdk_device_t *sxd = &dm->devices[slave]; - sxd->flags |= DPDK_DEVICE_FLAG_BOND_SLAVE; - sxd->bond_port = i; - /* Set slaves bitmap for bonded interface */ - bhi->bond_info = clib_bitmap_set - (bhi->bond_info, sdev->hw_if_index, 1); - /* Set MACs and slave link flags on slave interface */ - shi = vnet_get_hw_interface (vnm, sdev->hw_if_index); - ssi = vnet_get_sw_interface (vnm, sdev->sw_if_index); - sei = pool_elt_at_index - (em->interfaces, shi->hw_instance); - shi->bond_info = VNET_HW_INTERFACE_BOND_INFO_SLAVE; - ssi->flags |= VNET_SW_INTERFACE_FLAG_BOND_SLAVE; - clib_memcpy (shi->hw_address, addr, 6); - clib_memcpy (sei->address, addr, 6); - /* Set l3 packet size allowed as the lowest of slave */ - if (bhi->max_packet_bytes > shi->max_packet_bytes) - bhi->max_packet_bytes = shi->max_packet_bytes; - } - } - } - } - /* *INDENT-ON* */ - } - } - while (1) { /* @@ -1733,7 +1867,6 @@ dpdk_init (vlib_main_t * vm) { dpdk_main_t *dm = &dpdk_main; clib_error_t *error = 0; - vlib_thread_main_t *tm = vlib_get_thread_main (); /* verify that structs are cacheline aligned */ STATIC_ASSERT (offsetof (dpdk_device_t, cacheline0) == 0, @@ -1746,37 +1879,33 @@ dpdk_init (vlib_main_t * vm) STATIC_ASSERT (RTE_CACHE_LINE_SIZE == 1 << CLIB_LOG2_CACHE_LINE_BYTES, "DPDK RTE CACHE LINE SIZE does not match with 1<vlib_main = vm; dm->vnet_main = vnet_get_main (); dm->conf = &dpdk_config_main; dm->conf->nchannels = 4; - dm->conf->num_mbufs = dm->conf->num_mbufs ? dm->conf->num_mbufs : NB_MBUF; vec_add1 (dm->conf->eal_init_args, (u8 *) "vnet"); - vec_validate (dm->recycle, tm->n_thread_stacks - 1); - /* Default vlib_buffer_t flags, DISABLES tcp/udp checksumming... */ - dm->buffer_flags_template = - (VLIB_BUFFER_TOTAL_LENGTH_VALID | VLIB_BUFFER_EXT_HDR_VALID - | VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | - VNET_BUFFER_F_L4_CHECKSUM_CORRECT | VNET_BUFFER_F_L2_HDR_OFFSET_VALID); + dm->buffer_flags_template = (VLIB_BUFFER_TOTAL_LENGTH_VALID | + VLIB_BUFFER_EXT_HDR_VALID | + VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | + VNET_BUFFER_F_L4_CHECKSUM_CORRECT); dm->stat_poll_interval = DPDK_STATS_POLL_INTERVAL; dm->link_state_poll_interval = DPDK_LINK_POLL_INTERVAL; - /* init CLI */ - if ((error = vlib_call_init_function (vm, dpdk_cli_init))) - return error; - dm->log_default = vlib_log_register_class ("dpdk", 0); + dm->log_cryptodev = vlib_log_register_class ("dpdk", "cryptodev"); + dm->log_ipsec = vlib_log_register_class ("dpdk", "ipsec"); return error; } VLIB_INIT_FUNCTION (dpdk_init); - /* * fd.io coding-style-patch-verification: ON *