n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
}
}
+#if DPDK_VHOST_USER
else if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
{
u32 offset = 0;
queue_id = 0;
while (__sync_lock_test_and_set (xd->lockp[queue_id], 1));
}
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
else {
dpdk_device_and_queue_t * dq;
vec_foreach (dq, dm->devices_by_cpu[vm->cpu_index])
assert (dq);
offset = dq->queue_id * VIRTIO_QNUM;
}
-#endif
if (PREDICT_TRUE(tx_head > tx_tail))
{
int i; u32 bytes = 0;
if (xd->need_txlock)
*xd->lockp[queue_id] = 0;
}
+#endif
#if RTE_LIBRTE_KNI
else if (xd->dev_type == VNET_DPDK_DEV_KNI)
{
static int dpdk_device_renumber (vnet_hw_interface_t * hi,
u32 new_dev_instance)
{
+#if DPDK_VHOST_USER
dpdk_main_t * dm = &dpdk_main;
dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
}
xd->vu_if_id = new_dev_instance;
+#endif
return 0;
}
memset (&xd->last_stats, 0, sizeof (xd->last_stats));
}
+#if DPDK_VHOST_USER
if (PREDICT_FALSE(xd->dev_type == VNET_DPDK_DEV_VHOST_USER)) {
int i;
for (i = 0; i < xd->rx_q_used * VIRTIO_QNUM; i++) {
xd->vu_intf->vrings[i].bytes = 0;
}
}
+#endif
}
#ifdef RTE_LIBRTE_KNI
else
{
xd->admin_up = 0;
- rte_kni_release(xd->kni);
+ int kni_rv;
+
+ kni_rv = rte_kni_release(xd->kni);
+ if (kni_rv < 0)
+ clib_warning ("rte_kni_release returned %d", kni_rv);
}
return 0;
}
#endif
+#if DPDK_VHOST_USER
if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
{
if (is_up)
return 0;
}
+#endif
if (is_up)
*/
if (xd->pmd != VNET_DPDK_PMD_VMXNET3)
rte_eth_dev_stop (xd->device_index);
+
+ /* For bonded interface, stop slave links */
+ if (xd->pmd == VNET_DPDK_PMD_BOND)
+ {
+ u8 slink[16];
+ int nlink = rte_eth_bond_slaves_get(xd->device_index, slink, 16);
+ while (nlink >=1)
+ {
+ u8 dpdk_port = slink[--nlink];
+ rte_eth_dev_stop (dpdk_port);
+ }
+ }
}
if (rv < 0)