#include <vnet/vnet.h>
#include <vppinfra/vec.h>
#include <vppinfra/format.h>
-#include <vlib/unix/cj.h>
+#include <vppinfra/file.h>
+#include <vlib/unix/unix.h>
#include <assert.h>
#include <vnet/ip/ip.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/ethernet/arp_packet.h>
+#include <vnet/interface/rx_queue_funcs.h>
+#include <dpdk/buffer.h>
#include <dpdk/device/dpdk.h>
-
#include <dpdk/device/dpdk_priv.h>
#include <vppinfra/error.h>
void
dpdk_device_setup (dpdk_device_t * xd)
{
- dpdk_main_t *dm = &dpdk_main;
+ vlib_main_t *vm = vlib_get_main ();
vnet_main_t *vnm = vnet_get_main ();
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, xd->sw_if_index);
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
+ struct rte_eth_dev_info dev_info;
+ u64 bitmap;
int rv;
int j;
if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
{
- vnet_hw_interface_set_flags (dm->vnet_main, xd->hw_if_index, 0);
+ vnet_hw_interface_set_flags (vnm, xd->hw_if_index, 0);
dpdk_device_stop (xd);
}
xd->port_conf.fdir_conf.mode = RTE_FDIR_MODE_NONE;
}
+ rte_eth_dev_info_get (xd->port_id, &dev_info);
+
+ bitmap = xd->port_conf.txmode.offloads & ~dev_info.tx_offload_capa;
+ if (bitmap)
+ {
+ dpdk_log_warn ("unsupported tx offloads requested on port %u: %U",
+ xd->port_id, format_dpdk_tx_offload_caps, bitmap);
+ xd->port_conf.txmode.offloads ^= bitmap;
+ }
+
+ bitmap = xd->port_conf.rxmode.offloads & ~dev_info.rx_offload_capa;
+ if (bitmap)
+ {
+ dpdk_log_warn ("unsupported rx offloads requested on port %u: %U",
+ xd->port_id, format_dpdk_rx_offload_caps, bitmap);
+ xd->port_conf.rxmode.offloads ^= bitmap;
+ }
+
rv = rte_eth_dev_configure (xd->port_id, xd->rx_q_used,
xd->tx_q_used, &xd->port_conf);
goto error;
}
- /* Set up one TX-queue per worker thread */
+ vec_validate_aligned (xd->tx_queues, xd->tx_q_used - 1,
+ CLIB_CACHE_LINE_BYTES);
for (j = 0; j < xd->tx_q_used; j++)
{
rv =
&xd->tx_conf);
if (rv < 0)
dpdk_device_error (xd, "rte_eth_tx_queue_setup", rv);
+
+ if (xd->tx_q_used < tm->n_vlib_mains)
+ clib_spinlock_init (&vec_elt (xd->tx_queues, j).lock);
}
- vec_validate_aligned (xd->buffer_pool_for_queue, xd->rx_q_used - 1,
+ vec_validate_aligned (xd->rx_queues, xd->rx_q_used - 1,
CLIB_CACHE_LINE_BYTES);
for (j = 0; j < xd->rx_q_used; j++)
{
- dpdk_mempool_private_t *privp;
- uword tidx = vnet_get_device_input_thread_index (dm->vnet_main,
- xd->hw_if_index, j);
- unsigned lcore = vlib_worker_threads[tidx].lcore_id;
- u16 socket_id = rte_lcore_to_socket_id (lcore);
+ dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, j);
+ u8 bpidx = vlib_buffer_pool_get_default_for_numa (
+ vm, vnet_hw_if_get_rx_queue_numa_node (vnm, rxq->queue_index));
+ vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, bpidx);
+ struct rte_mempool *mp = dpdk_mempool_by_buffer_pool_index[bpidx];
- rv =
- rte_eth_rx_queue_setup (xd->port_id, j, xd->nb_rx_desc,
- xd->cpu_socket, 0,
- dm->pktmbuf_pools[socket_id]);
+ rv = rte_eth_rx_queue_setup (xd->port_id, j, xd->nb_rx_desc,
+ xd->cpu_socket, 0, mp);
/* retry with any other CPU socket */
if (rv < 0)
- rv =
- rte_eth_rx_queue_setup (xd->port_id, j,
- xd->nb_rx_desc, SOCKET_ID_ANY, 0,
- dm->pktmbuf_pools[socket_id]);
+ rv = rte_eth_rx_queue_setup (xd->port_id, j, xd->nb_rx_desc,
+ SOCKET_ID_ANY, 0, mp);
- privp = rte_mempool_get_priv (dm->pktmbuf_pools[socket_id]);
- xd->buffer_pool_for_queue[j] = privp->buffer_pool_index;
+ rxq->buffer_pool_index = bp->index;
if (rv < 0)
dpdk_device_error (xd, "rte_eth_rx_queue_setup", rv);
goto error;
rte_eth_dev_set_mtu (xd->port_id, hi->max_packet_bytes);
+ xd->buffer_flags =
+ (VLIB_BUFFER_TOTAL_LENGTH_VALID | VLIB_BUFFER_EXT_HDR_VALID);
+ if (xd->port_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_CKSUM)
+ xd->buffer_flags |=
+ (VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
dpdk_device_start (xd);
sw->flags |= VNET_SW_INTERFACE_FLAG_ERROR;
}
+static clib_error_t *
+dpdk_rx_read_ready (clib_file_t *uf)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ dpdk_main_t *dm = &dpdk_main;
+ u32 qidx = uf->private_data;
+ vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, qidx);
+ dpdk_device_t *xd = vec_elt_at_index (dm->devices, rxq->dev_instance);
+
+ u64 b;
+ CLIB_UNUSED (ssize_t size) = read (uf->file_descriptor, &b, sizeof (b));
+ if (rxq->mode != VNET_HW_IF_RX_MODE_POLLING)
+ {
+ vnet_hw_if_rx_queue_set_int_pending (vnm, uf->private_data);
+ rte_eth_dev_rx_intr_enable (xd->port_id, rxq->queue_id);
+ }
+
+ return 0;
+}
+
+static void
+dpdk_setup_interrupts (dpdk_device_t *xd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
+ if (!hi)
+ return;
+
+ if (!xd->port_conf.intr_conf.rxq)
+ return;
+
+ /* Probe for interrupt support */
+ if (rte_eth_dev_rx_intr_enable (xd->port_id, 0))
+ {
+ dpdk_log_info ("probe for interrupt mode for device %U. Failed.\n",
+ format_dpdk_device_name, xd->port_id);
+ }
+ else
+ {
+ xd->flags |= DPDK_DEVICE_FLAG_INT_SUPPORTED;
+ if (!(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
+ rte_eth_dev_rx_intr_disable (xd->port_id, 0);
+ dpdk_log_info ("Probe for interrupt mode for device %U. Success.\n",
+ format_dpdk_device_name, xd->port_id);
+ }
+
+ if (xd->flags & DPDK_DEVICE_FLAG_INT_SUPPORTED)
+ {
+ hi->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE;
+ for (int q = 0; q < xd->rx_q_used; q++)
+ {
+ dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, q);
+ clib_file_t f = { 0 };
+ rxq->efd = rte_eth_dev_rx_intr_ctl_q_get_fd (xd->port_id, q);
+ if (rxq->efd < 0)
+ {
+ xd->flags &= ~DPDK_DEVICE_FLAG_INT_SUPPORTED;
+ hi->caps &= ~VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE;
+ break;
+ }
+ f.read_function = dpdk_rx_read_ready;
+ f.flags = UNIX_FILE_EVENT_EDGE_TRIGGERED;
+ f.file_descriptor = rxq->efd;
+ f.private_data = rxq->queue_index;
+ f.description =
+ format (0, "%U queue %u", format_dpdk_device_name, xd->port_id, q);
+ rxq->clib_file_index = clib_file_add (&file_main, &f);
+ vnet_hw_if_set_rx_queue_file_index (vnm, rxq->queue_index,
+ rxq->clib_file_index);
+ if (xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE)
+ {
+ clib_file_main_t *fm = &file_main;
+ clib_file_t *f =
+ pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
+ fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
+ }
+ }
+ }
+ vnet_hw_if_update_runtime_data (vnm, xd->hw_if_index);
+}
+
void
dpdk_device_start (dpdk_device_t * xd)
{
return;
}
+ dpdk_setup_interrupts (xd);
+
if (xd->default_mac_address)
- rv =
- rte_eth_dev_default_mac_addr_set (xd->port_id,
- (struct ether_addr *)
- xd->default_mac_address);
+ rv = rte_eth_dev_default_mac_addr_set (xd->port_id,
+ (void *) xd->default_mac_address);
if (rv)
dpdk_device_error (xd, "rte_eth_dev_default_mac_addr_set", rv);
rte_eth_allmulticast_enable (xd->port_id);
- if (xd->pmd == VNET_DPDK_PMD_BOND)
- {
- dpdk_portid_t slink[16];
- int nlink = rte_eth_bond_slaves_get (xd->port_id, slink, 16);
- while (nlink >= 1)
- {
- dpdk_portid_t dpdk_port = slink[--nlink];
- rte_eth_allmulticast_enable (dpdk_port);
- }
- }
-
dpdk_log_info ("Interface %U started",
format_dpdk_device_name, xd->port_id);
}
rte_eth_allmulticast_disable (xd->port_id);
rte_eth_dev_stop (xd->port_id);
+ clib_memset (&xd->link, 0, sizeof (struct rte_eth_link));
- /* For bonded interface, stop slave links */
- if (xd->pmd == VNET_DPDK_PMD_BOND)
- {
- dpdk_portid_t slink[16];
- int nlink = rte_eth_bond_slaves_get (xd->port_id, slink, 16);
- while (nlink >= 1)
- {
- dpdk_portid_t dpdk_port = slink[--nlink];
- rte_eth_dev_stop (dpdk_port);
- }
- }
dpdk_log_info ("Interface %U stopped",
format_dpdk_device_name, xd->port_id);
}
-/* Even type for send_garp_na_process */
-enum
-{
- SEND_GARP_NA = 1,
-} dpdk_send_garp_na_process_event_t;
-
-static vlib_node_registration_t send_garp_na_proc_node;
-
-static uword
-send_garp_na_process (vlib_main_t * vm,
- vlib_node_runtime_t * rt, vlib_frame_t * f)
-{
- uword event_type, *event_data = 0;
-
- while (1)
- {
- u32 i;
- uword dpdk_port;
- vlib_process_wait_for_event (vm);
- event_type = vlib_process_get_events (vm, &event_data);
- ASSERT (event_type == SEND_GARP_NA);
- for (i = 0; i < vec_len (event_data); i++)
- {
- dpdk_port = event_data[i];
- if (i < 5) /* wait 0.2 sec for link to settle, max total 1 sec */
- vlib_process_suspend (vm, 0.2);
- dpdk_device_t *xd = &dpdk_main.devices[dpdk_port];
- dpdk_update_link_state (xd, vlib_time_now (vm));
- send_ip4_garp (vm, xd->sw_if_index);
- send_ip6_na (vm, xd->sw_if_index);
- }
- vec_reset_length (event_data);
- }
- return 0;
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (send_garp_na_proc_node, static) = {
- .function = send_garp_na_process,
- .type = VLIB_NODE_TYPE_PROCESS,
- .name = "send-garp-na-process",
-};
-/* *INDENT-ON* */
-
void vl_api_force_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
-static void
-garp_na_proc_callback (uword * dpdk_port)
-{
- vlib_main_t *vm = vlib_get_main ();
- ASSERT (vlib_get_thread_index () == 0);
- vlib_process_signal_event
- (vm, send_garp_na_proc_node.index, SEND_GARP_NA, *dpdk_port);
-}
-
always_inline int
dpdk_port_state_callback_inline (dpdk_portid_t port_id,
enum rte_eth_event_type type, void *param)
{
struct rte_eth_link link;
- dpdk_device_t *xd = &dpdk_main.devices[port_id];
RTE_SET_USED (param);
if (type != RTE_ETH_EVENT_INTR_LSC)
rte_eth_link_get_nowait (port_id, &link);
u8 link_up = link.link_status;
-
- if (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE)
- {
- uword bd_port = xd->bond_port;
- int bd_mode = rte_eth_bond_mode_get (bd_port);
- dpdk_log_info ("Port %d state to %s, "
- "slave of port %d BondEthernet%d in mode %d",
- port_id, (link_up) ? "UP" : "DOWN",
- bd_port, xd->bond_instance_num, bd_mode);
- if (bd_mode == BONDING_MODE_ACTIVE_BACKUP)
- {
- vl_api_force_rpc_call_main_thread
- (garp_na_proc_callback, (u8 *) & bd_port, sizeof (uword));
- }
-
- if (link_up)
- xd->flags |= DPDK_DEVICE_FLAG_BOND_SLAVE_UP;
- else
- xd->flags &= ~DPDK_DEVICE_FLAG_BOND_SLAVE_UP;
- }
- else /* Should not happen as callback not setup for "normal" links */
- {
- if (link_up)
- dpdk_log_info ("Port %d Link Up - speed %u Mbps - %s",
- port_id, (unsigned) link.link_speed,
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
- "full-duplex" : "half-duplex");
- else
- dpdk_log_info ("Port %d Link Down\n\n", port_id);
- }
+ if (link_up)
+ dpdk_log_info ("Port %d Link Up - speed %u Mbps - %s",
+ port_id, (unsigned) link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ "full-duplex" : "half-duplex");
+ else
+ dpdk_log_info ("Port %d Link Down\n\n", port_id);
return 0;
}
return dpdk_port_state_callback_inline (port_id, type, param);
}
+/* If this device is PCI return pointer to info, otherwise NULL */
+struct rte_pci_device *
+dpdk_get_pci_device (const struct rte_eth_dev_info *info)
+{
+ const struct rte_bus *bus;
+
+ bus = rte_bus_find_by_device (info->device);
+ if (bus && !strcmp (bus->name, "pci"))
+ return RTE_DEV_TO_PCI (info->device);
+ else
+ return NULL;
+}
+
+/* If this device is VMBUS return pointer to info, otherwise NULL */
+struct rte_vmbus_device *
+dpdk_get_vmbus_device (const struct rte_eth_dev_info *info)
+{
+ const struct rte_bus *bus;
+
+ bus = rte_bus_find_by_device (info->device);
+ if (bus && !strcmp (bus->name, "vmbus"))
+ return container_of (info->device, struct rte_vmbus_device, device);
+ else
+ return NULL;
+}
+
/*
* fd.io coding-style-patch-verification: ON
*