*/
if (PREDICT_FALSE(xd->lockp != 0))
{
- queue_id = 0;
- while (__sync_lock_test_and_set (xd->lockp, 1))
- /* zzzz */;
+ queue_id = queue_id % xd->tx_q_used;
+ while (__sync_lock_test_and_set (xd->lockp[queue_id], 1))
+ /* zzzz */
+ queue_id = (queue_id + 1) % xd->tx_q_used;
}
if (PREDICT_TRUE(xd->dev_type == VNET_DPDK_DEV_ETH))
}
if (PREDICT_FALSE(xd->lockp != 0))
- *xd->lockp = 0;
+ *xd->lockp[queue_id] = 0;
if (PREDICT_FALSE(rv < 0))
{
typedef struct {
CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
- volatile u32 *lockp;
+ volatile u32 **lockp;
/* Instance ID */
u32 device_index;
u32 nchannels;
u32 num_mbufs;
u32 use_rss;
+ u32 max_tx_queues;
u8 num_kni; /* while kni_init allows u32, port_id in callback fn is only u8 */
/* Ethernet input node index */
}
void dpdk_update_link_state (dpdk_device_t * xd, f64 now);
+void dpdk_device_lock_init(dpdk_device_t * xd);
+void dpdk_device_lock_free(dpdk_device_t * xd);
void dpdk_efd_update_counters(dpdk_device_t *xd, u32 n_buffers, u16 enabled);
u32 is_efd_discardable(vlib_thread_main_t *tm,
vlib_buffer_t * b0,
extern int rte_netmap_probe(void);
#endif
+void
+dpdk_device_lock_init(dpdk_device_t * xd)
+{
+ int q;
+ vec_validate(xd->lockp, xd->tx_q_used - 1);
+ for (q = 0; q < xd->tx_q_used; q++)
+ {
+ xd->lockp[q] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+ memset ((void *) xd->lockp[q], 0, CLIB_CACHE_LINE_BYTES);
+ }
+}
+
+void
+dpdk_device_lock_free(dpdk_device_t * xd)
+{
+ int q;
+
+ for (q = 0; q < vec_len(xd->lockp); q++)
+ clib_mem_free((void *) xd->lockp[q]);
+ vec_free(xd->lockp);
+ xd->lockp = 0;
+}
+
static clib_error_t *
dpdk_lib_init (dpdk_main_t * dm)
{
memcpy(&xd->port_conf, &port_conf_template, sizeof(struct rte_eth_conf));
- xd->tx_q_used = dev_info.max_tx_queues < tm->n_vlib_mains ?
- 1 : tm->n_vlib_mains;
+ xd->tx_q_used = clib_min(dev_info.max_tx_queues, tm->n_vlib_mains);
+
+ if (dm->max_tx_queues)
+ xd->tx_q_used = clib_min(xd->tx_q_used, dm->max_tx_queues);
if (dm->use_rss > 1 && dev_info.max_rx_queues >= dm->use_rss)
{
rte_eth_macaddr_get(i,(struct ether_addr *)addr);
if (xd->tx_q_used < tm->n_vlib_mains)
- {
- xd->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- memset ((void *) xd->lockp, 0, CLIB_CACHE_LINE_BYTES);
- }
+ dpdk_device_lock_init(xd);
xd->device_index = xd - dm->devices;
ASSERT(i == xd->device_index);
else if (unformat (input, "num-mbufs %d", &dm->num_mbufs))
;
+ else if (unformat (input, "max-tx-queues %d", &dm->max_tx_queues))
+ ;
else if (unformat (input, "kni %d", &dm->num_kni))
;
else if (unformat (input, "uio-driver %s", &dm->uio_driver_name))
}
// reset lockp
- if (xd->lockp)
- memset ((void *) xd->lockp, 0, CLIB_CACHE_LINE_BYTES);
+ dpdk_device_lock_free(xd);
+
+ if (xd->tx_q_used < tm->n_vlib_mains)
+ dpdk_device_lock_init(xd);
// reset tx vectors
for (j = 0; j < tm->n_vlib_mains; j++)
xd->vu_vhost_dev.virtqueue[j]->backend = -1;
}
- xd->lockp = NULL;
- if (xd->tx_q_used < dm->input_cpu_count) {
- xd->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- memset ((void *) xd->lockp, 0, CLIB_CACHE_LINE_BYTES);
- }
+ if (xd->tx_q_used < dm->input_cpu_count)
+ dpdk_device_lock_init(xd);
DBG_SOCK("tm->n_vlib_mains: %d. TX %d, RX: %d, num_qpairs: %d, Lock: %p",
tm->n_vlib_mains, xd->tx_q_used, xd->rx_q_used, num_qpairs, xd->lockp);
.short_help = "show vhost-user interface",
.function = show_dpdk_vhost_user_command_fn,
};
-