n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
}
}
+#if RTE_LIBRTE_KNI
else if (xd->dev_type == VNET_DPDK_DEV_KNI)
{
if (PREDICT_TRUE(tx_head > tx_tail))
n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
}
}
+#endif
else
{
ASSERT(0);
else
devname_format = "%s%x/%x/%x";
+#ifdef RTE_LIBRTE_KNI
if (dm->devices[i].dev_type == VNET_DPDK_DEV_KNI) {
return format(s, "kni%d", dm->devices[i].kni_port_id);
- } else if (dm->devices[i].dev_type == VNET_DPDK_DEV_VHOST_USER) {
+ } else
+#endif
+ if (dm->devices[i].dev_type == VNET_DPDK_DEV_VHOST_USER) {
return format(s, "VirtualEthernet0/0/%d", dm->devices[i].vu_if_id);
}
switch (dm->devices[i].port_type)
rte_eth_xstats_reset(xd->device_index);
}
+#ifdef RTE_LIBRTE_KNI
static int
kni_config_network_if(u8 port_id, u8 if_up)
{
return 0;
}
+#endif
static clib_error_t *
dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
dpdk_device_t * xd = vec_elt_at_index (dm->devices, hif->dev_instance);
int rv = 0;
+#ifdef RTE_LIBRTE_KNI
if (xd->dev_type == VNET_DPDK_DEV_KNI)
{
if (is_up)
}
return 0;
}
+#endif
if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
{
if (is_up)
}
}
+#ifdef RTE_LIBRTE_KNI
else if (xd->dev_type == VNET_DPDK_DEV_KNI)
{
n_buffers = rte_kni_rx_burst(xd->kni, xd->rx_vectors[queue_id], VLIB_FRAME_SIZE);
rte_kni_handle_request(xd->kni);
}
+#endif
else
{
ASSERT(0);
rte_eth_dev_set_mtu(xd->device_index, hi->max_packet_bytes);
}
+#ifdef RTE_LIBRTE_KNI
if (dm->num_kni) {
clib_warning("Initializing KNI interfaces...");
rte_kni_init(dm->num_kni);
hi = vnet_get_hw_interface (dm->vnet_main, xd->vlib_hw_if_index);
}
}
+#endif
if (nb_desc > dm->num_mbufs)
clib_warning ("%d mbufs allocated but total rx/tx ring size is %d\n",