#include <vlib/vlib.h>
#include <vlib/unix/unix.h>
-#include <vnet/ip/ip.h>
-
#include <vnet/ethernet/ethernet.h>
#include <vnet/devices/devices.h>
#include <vnet/feature/feature.h>
}
vui->nregions = 0;
- for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ for (q = 0; q < vui->num_qid; q++)
{
vq = &vui->vrings[q];
vq->avail = 0;
vui->use_tx_spinlock = 0;
while (1)
{
- for (qid = 0; qid < VHOST_VRING_MAX_N / 2; qid++)
+ for (qid = 0; qid < vui->num_qid / 2; qid++)
{
vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
if (!rxvq->started || !rxvq->enabled)
vnet_hw_interface_set_input_node (vnm, vui->hw_if_index,
vhost_user_input_node.index);
vnet_hw_interface_assign_rx_thread (vnm, vui->hw_if_index, q, ~0);
- if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_UNKNOWN)
+ if (txvq->mode == VNET_HW_IF_RX_MODE_UNKNOWN)
/* Set polling as the default */
- txvq->mode = VNET_HW_INTERFACE_RX_MODE_POLLING;
+ txvq->mode = VNET_HW_IF_RX_MODE_POLLING;
txvq->qid = q;
rv = vnet_hw_interface_set_rx_mode (vnm, vui->hw_if_index, q, txvq->mode);
if (rv)
{
int i, found[2] = { }; //RX + TX
- for (i = 0; i < VHOST_VRING_MAX_N; i++)
+ for (i = 0; i < vui->num_qid; i++)
if (vui->vrings[i].started && vui->vrings[i].enabled)
found[i & 1] = 1;
u32 qid = uf->private_data & 0xff;
n = read (uf->file_descriptor, ((char *) &buff), 8);
- vu_log_debug (vui, "if %d KICK queue %d", uf->private_data >> 8, qid);
+ vu_log_debug (vui, "if %d KICK queue %d", vui->hw_if_index, qid);
if (!vui->vrings[qid].started ||
(vhost_user_intf_ready (vui) != vui->is_ready))
{
vhost_user_vring_init (vhost_user_intf_t * vui, u32 qid)
{
vhost_user_vring_t *vring = &vui->vrings[qid];
+
clib_memset (vring, 0, sizeof (*vring));
vring->kickfd_idx = ~0;
vring->callfd_idx = ~0;
vring->errfd = -1;
vring->qid = -1;
+ clib_spinlock_init (&vring->vring_lock);
+
/*
* We have a bug with some qemu 2.5, and this may be a fix.
* Feel like interpretation holy text, but this is from vhost-user.txt.
vring->errfd = -1;
}
+ clib_spinlock_free (&vring->vring_lock);
+
// save the qid so that we don't need to unassign and assign_rx_thread
// when the interface comes back up. They are expensive calls.
u16 q = vui->vrings[qid].qid;
vui->is_ready = 0;
- for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ for (q = 0; q < vui->num_qid; q++)
vhost_user_vring_close (vui, q);
unmap_all_mem_regions (vui);
{
case VHOST_USER_GET_FEATURES:
msg.flags |= 4;
- msg.u64 = (1ULL << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
- (1ULL << FEAT_VIRTIO_NET_F_CTRL_VQ) |
- (1ULL << FEAT_VIRTIO_F_ANY_LAYOUT) |
- (1ULL << FEAT_VIRTIO_F_INDIRECT_DESC) |
- (1ULL << FEAT_VHOST_F_LOG_ALL) |
- (1ULL << FEAT_VIRTIO_NET_F_GUEST_ANNOUNCE) |
- (1ULL << FEAT_VIRTIO_NET_F_MQ) |
- (1ULL << FEAT_VHOST_USER_F_PROTOCOL_FEATURES) |
- (1ULL << FEAT_VIRTIO_F_VERSION_1);
+ msg.u64 = VIRTIO_FEATURE (VIRTIO_NET_F_MRG_RXBUF) |
+ VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ) |
+ VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT) |
+ VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC) |
+ VIRTIO_FEATURE (VHOST_F_LOG_ALL) |
+ VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_ANNOUNCE) |
+ VIRTIO_FEATURE (VIRTIO_NET_F_MQ) |
+ VIRTIO_FEATURE (VHOST_USER_F_PROTOCOL_FEATURES) |
+ VIRTIO_FEATURE (VIRTIO_F_VERSION_1);
msg.u64 &= vui->feature_mask;
+
+ if (vui->enable_gso)
+ msg.u64 |= FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS;
+ if (vui->enable_packed)
+ msg.u64 |= VIRTIO_FEATURE (VIRTIO_F_RING_PACKED);
+
msg.size = sizeof (msg.u64);
vu_log_debug (vui, "if %d msg VHOST_USER_GET_FEATURES - reply "
"0x%016llx", vui->hw_if_index, msg.u64);
vui->features = msg.u64;
if (vui->features &
- ((1 << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
- (1ULL << FEAT_VIRTIO_F_VERSION_1)))
+ (VIRTIO_FEATURE (VIRTIO_NET_F_MRG_RXBUF) |
+ VIRTIO_FEATURE (VIRTIO_F_VERSION_1)))
vui->virtio_net_hdr_sz = 12;
else
vui->virtio_net_hdr_sz = 10;
vui->is_any_layout =
- (vui->features & (1 << FEAT_VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
+ (vui->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
ASSERT (vui->virtio_net_hdr_sz < VLIB_BUFFER_PRE_DATA_SIZE);
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vui->hw_if_index);
+ if (vui->enable_gso &&
+ ((vui->features & FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS)
+ == FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS))
+ hw->flags |=
+ (VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD);
+ else
+ hw->flags &= ~(VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD);
vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
vui->is_ready = 0;
vhost_user_update_iface_state (vui);
vui->nregions++;
}
+
+ /*
+ * Re-compute desc, used, and avail descriptor table if vring address
+ * is set.
+ */
+ for (q = 0; q < vui->num_qid; q++)
+ {
+ if (vui->vrings[q].desc_user_addr &&
+ vui->vrings[q].used_user_addr && vui->vrings[q].avail_user_addr)
+ {
+ vui->vrings[q].desc =
+ map_user_mem (vui, vui->vrings[q].desc_user_addr);
+ vui->vrings[q].used =
+ map_user_mem (vui, vui->vrings[q].used_user_addr);
+ vui->vrings[q].avail =
+ map_user_mem (vui, vui->vrings[q].avail_user_addr);
+ }
+ }
vlib_worker_thread_barrier_release (vm);
break;
if ((msg.state.num > 32768) || /* maximum ring size is 32768 */
(msg.state.num == 0) || /* it cannot be zero */
- ((msg.state.num - 1) & msg.state.num)) /* must be power of 2 */
- goto close_socket;
+ ((msg.state.num - 1) & msg.state.num) || /* must be power of 2 */
+ (msg.state.index >= vui->num_qid))
+ {
+ vu_log_debug (vui, "invalid VHOST_USER_SET_VRING_NUM: msg.state.num"
+ " %d, msg.state.index %d, curruent max q %d",
+ msg.state.num, msg.state.index, vui->num_qid);
+ goto close_socket;
+ }
vui->vrings[msg.state.index].qsz_mask = msg.state.num - 1;
break;
vu_log_debug (vui, "if %d msg VHOST_USER_SET_VRING_ADDR idx %d",
vui->hw_if_index, msg.state.index);
- if (msg.state.index >= VHOST_VRING_MAX_N)
+ if (msg.state.index >= vui->num_qid)
{
vu_log_debug (vui, "invalid vring index VHOST_USER_SET_VRING_ADDR:"
- " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
+ " %u >= %u", msg.state.index, vui->num_qid);
goto close_socket;
}
goto close_socket;
}
+ vui->vrings[msg.state.index].desc_user_addr = msg.addr.desc_user_addr;
+ vui->vrings[msg.state.index].used_user_addr = msg.addr.used_user_addr;
+ vui->vrings[msg.state.index].avail_user_addr = msg.addr.avail_user_addr;
+
vlib_worker_thread_barrier_sync (vm);
vui->vrings[msg.state.index].desc = desc;
vui->vrings[msg.state.index].used = used;
/* Spec says: If VHOST_USER_F_PROTOCOL_FEATURES has not been negotiated,
the ring is initialized in an enabled state. */
- if (!(vui->features & (1 << FEAT_VHOST_USER_F_PROTOCOL_FEATURES)))
+ if (!(vui->features & VIRTIO_FEATURE (VHOST_USER_F_PROTOCOL_FEATURES)))
vui->vrings[msg.state.index].enabled = 1;
vui->vrings[msg.state.index].last_used_idx =
vui->vrings[msg.state.index].used->idx;
/* tell driver that we don't want interrupts */
- vui->vrings[msg.state.index].used->flags = VRING_USED_F_NO_NOTIFY;
+ if (vhost_user_is_packed_ring_supported (vui))
+ vui->vrings[msg.state.index].used_event->flags =
+ VRING_EVENT_F_DISABLE;
+ else
+ vui->vrings[msg.state.index].used->flags = VRING_USED_F_NO_NOTIFY;
vlib_worker_thread_barrier_release (vm);
vhost_user_update_iface_state (vui);
break;
vui->hw_if_index, msg.u64);
q = (u8) (msg.u64 & 0xFF);
-
- /* if there is old fd, delete and close it */
- if (vui->vrings[q].callfd_idx != ~0)
+ if (vui->num_qid > q)
{
- clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
- vui->vrings[q].callfd_idx);
- clib_file_del (&file_main, uf);
- vui->vrings[q].callfd_idx = ~0;
+ /* if there is old fd, delete and close it */
+ if (vui->vrings[q].callfd_idx != ~0)
+ {
+ clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
+ vui->vrings[q].callfd_idx);
+ clib_file_del (&file_main, uf);
+ vui->vrings[q].callfd_idx = ~0;
+ }
+ }
+ else if (vec_len (vui->vrings) > q)
+ {
+ /* grow vrings by pair (RX + TX) */
+ vui->num_qid = (q & 1) ? (q + 1) : (q + 2);
+ }
+ else
+ {
+ u32 i, new_max_q, old_max_q = vec_len (vui->vrings);
+
+ /*
+ * Double the array size if it is less than 64 entries.
+ * Slow down thereafter.
+ */
+ if (vec_len (vui->vrings) < (VHOST_VRING_INIT_MQ_PAIR_SZ << 3))
+ new_max_q = vec_len (vui->vrings) << 1;
+ else
+ new_max_q = vec_len (vui->vrings) +
+ (VHOST_VRING_INIT_MQ_PAIR_SZ << 2);
+ if (new_max_q > (VHOST_VRING_MAX_MQ_PAIR_SZ << 1))
+ new_max_q = (VHOST_VRING_MAX_MQ_PAIR_SZ << 1);
+
+ /* sync with the worker threads, vrings may move due to realloc */
+ vlib_worker_thread_barrier_sync (vm);
+ vec_validate_aligned (vui->vrings, new_max_q - 1,
+ CLIB_CACHE_LINE_BYTES);
+ vlib_worker_thread_barrier_release (vm);
+
+ for (i = old_max_q; i < vec_len (vui->vrings); i++)
+ vhost_user_vring_init (vui, i);
+
+ /* grow vrings by pair (RX + TX) */
+ vui->num_qid = (q & 1) ? (q + 1) : (q + 2);
}
if (!(msg.u64 & VHOST_USER_VRING_NOFD_MASK))
vui->hw_if_index, msg.u64);
q = (u8) (msg.u64 & 0xFF);
+ if (q >= vui->num_qid)
+ {
+ vu_log_debug (vui, "invalid vring index VHOST_USER_SET_VRING_KICK:"
+ " %u >= %u", q, vui->num_qid);
+ goto close_socket;
+ }
if (vui->vrings[q].kickfd_idx != ~0)
{
vui->hw_if_index, msg.u64);
q = (u8) (msg.u64 & 0xFF);
+ if (q >= vui->num_qid)
+ {
+ vu_log_debug (vui, "invalid vring index VHOST_USER_SET_VRING_ERR:"
+ " %u >= %u", q, vui->num_qid);
+ goto close_socket;
+ }
if (vui->vrings[q].errfd != -1)
close (vui->vrings[q].errfd);
break;
case VHOST_USER_SET_VRING_BASE:
- vu_log_debug (vui, "if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
+ vu_log_debug (vui,
+ "if %d msg VHOST_USER_SET_VRING_BASE idx %d num 0x%x",
vui->hw_if_index, msg.state.index, msg.state.num);
+ if (msg.state.index >= vui->num_qid)
+ {
+ vu_log_debug (vui, "invalid vring index VHOST_USER_SET_VRING_ADDR:"
+ " %u >= %u", msg.state.index, vui->num_qid);
+ goto close_socket;
+ }
vlib_worker_thread_barrier_sync (vm);
vui->vrings[msg.state.index].last_avail_idx = msg.state.num;
+ if (vhost_user_is_packed_ring_supported (vui))
+ {
+ /*
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | last avail idx | | last used idx | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * ^ ^
+ * | |
+ * avail wrap counter used wrap counter
+ */
+ /* last avail idx at bit 0-14. */
+ vui->vrings[msg.state.index].last_avail_idx =
+ msg.state.num & 0x7fff;
+ /* avail wrap counter at bit 15 */
+ vui->vrings[msg.state.index].avail_wrap_counter =
+ ! !(msg.state.num & (1 << 15));
+
+ /*
+ * Although last_used_idx is passed in the upper 16 bits in qemu
+ * implementation, in practice, last_avail_idx and last_used_idx are
+ * usually the same. As a result, DPDK does not bother to pass us
+ * last_used_idx. The spec is not clear on thex coding. I figured it
+ * out by reading the qemu code. So let's just read last_avail_idx
+ * and set last_used_idx equals to last_avail_idx.
+ */
+ vui->vrings[msg.state.index].last_used_idx =
+ vui->vrings[msg.state.index].last_avail_idx;
+ vui->vrings[msg.state.index].used_wrap_counter =
+ vui->vrings[msg.state.index].avail_wrap_counter;
+
+ if (vui->vrings[msg.state.index].avail_wrap_counter == 1)
+ vui->vrings[msg.state.index].avail_wrap_counter =
+ VRING_DESC_F_AVAIL;
+ }
vlib_worker_thread_barrier_release (vm);
break;
case VHOST_USER_GET_VRING_BASE:
- if (msg.state.index >= VHOST_VRING_MAX_N)
+ if (msg.state.index >= vui->num_qid)
{
vu_log_debug (vui, "invalid vring index VHOST_USER_GET_VRING_BASE:"
- " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
+ " %u >= %u", msg.state.index, vui->num_qid);
goto close_socket;
}
* closing the vring also initializes the vring last_avail_idx
*/
msg.state.num = vui->vrings[msg.state.index].last_avail_idx;
+ if (vhost_user_is_packed_ring_supported (vui))
+ {
+ msg.state.num =
+ (vui->vrings[msg.state.index].last_avail_idx & 0x7fff) |
+ (! !vui->vrings[msg.state.index].avail_wrap_counter << 15);
+ msg.state.num |=
+ ((vui->vrings[msg.state.index].last_used_idx & 0x7fff) |
+ (! !vui->vrings[msg.state.index].used_wrap_counter << 15)) << 16;
+ }
msg.flags |= 4;
msg.size = sizeof (msg.state);
*/
vhost_user_vring_close (vui, msg.state.index);
vlib_worker_thread_barrier_release (vm);
- vu_log_debug (vui, "if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
+ vu_log_debug (vui,
+ "if %d msg VHOST_USER_GET_VRING_BASE idx %d num 0x%x",
vui->hw_if_index, msg.state.index, msg.state.num);
n =
send (uf->file_descriptor, &msg, VHOST_USER_MSG_HDR_SZ + msg.size, 0);
case VHOST_USER_GET_QUEUE_NUM:
msg.flags |= 4;
- msg.u64 = VHOST_VRING_MAX_N;
+ msg.u64 = VHOST_VRING_MAX_MQ_PAIR_SZ;
msg.size = sizeof (msg.u64);
vu_log_debug (vui, "if %d msg VHOST_USER_GET_QUEUE_NUM - reply %d",
vui->hw_if_index, msg.u64);
vu_log_debug (vui, "if %d VHOST_USER_SET_VRING_ENABLE: %s queue %d",
vui->hw_if_index, msg.state.num ? "enable" : "disable",
msg.state.index);
- if (msg.state.index >= VHOST_VRING_MAX_N)
+ if (msg.state.index >= vui->num_qid)
{
vu_log_debug (vui, "invalid vring idx VHOST_USER_SET_VRING_ENABLE:"
- " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
+ " %u >= %u", msg.state.index, vui->num_qid);
goto close_socket;
}
template.file_descriptor = client_fd;
template.private_data = vui - vhost_user_main.vhost_user_interfaces;
vui->clib_file_index = clib_file_add (&file_main, &template);
+ vui->num_qid = 2;
return 0;
}
case ~0:
/* *INDENT-OFF* */
- pool_foreach (vui, vum->vhost_user_interfaces, {
+ pool_foreach (vui, vum->vhost_user_interfaces) {
next_timeout = timeout;
- for (qid = 0; qid < VHOST_VRING_MAX_N / 2; qid += 2)
+ for (qid = 0; qid < vui->num_qid / 2; qid += 2)
{
vhost_user_vring_t *rxvq = &vui->vrings[qid];
vhost_user_vring_t *txvq = &vui->vrings[qid + 1];
if ((next_timeout < timeout) && (next_timeout > 0.0))
timeout = next_timeout;
}
- });
+ }
/* *INDENT-ON* */
break;
timeout = 3.0;
/* *INDENT-OFF* */
- pool_foreach (vui, vum->vhost_user_interfaces, {
+ pool_foreach (vui, vum->vhost_user_interfaces) {
if (vui->unix_server_index == ~0) { //Nothing to do for server sockets
if (vui->clib_file_index == ~0)
/* try to connect */
strncpy (sun.sun_path, (char *) vui->sock_filename,
sizeof (sun.sun_path) - 1);
+ sun.sun_path[sizeof (sun.sun_path) - 1] = 0;
/* Avoid hanging VPP if the other end does not accept */
if (fcntl(sockfd, F_SETFL, O_NONBLOCK) < 0)
template.private_data =
vui - vhost_user_main.vhost_user_interfaces;
vui->clib_file_index = clib_file_add (&file_main, &template);
+ vui->num_qid = 2;
/* This sockfd is considered consumed */
sockfd = -1;
}
}
}
- });
+ }
/* *INDENT-ON* */
}
return 0;
// disconnect interface sockets
vhost_user_if_disconnect (vui);
+ vhost_user_update_gso_interface_count (vui, 0 /* delete */ );
vhost_user_update_iface_state (vui);
- for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ for (q = 0; q < vui->num_qid; q++)
{
// Remove existing queue mapping for the interface
if (q & 1)
}
}
- clib_mem_free ((void *) vui->vring_locks[q]);
+ clib_spinlock_free (&vui->vrings[q].vring_lock);
}
if (vui->unix_server_index != ~0)
vu_log_debug (vui, "Deleting vhost-user interface %s (instance %d)",
hwif->name, hwif->dev_instance);
- for (qid = 1; qid < VHOST_VRING_MAX_N / 2; qid += 2)
+ for (qid = 1; qid < vui->num_qid / 2; qid += 2)
{
vhost_user_vring_t *txvq = &vui->vrings[qid];
if (txvq->qid == -1)
continue;
if ((vum->ifq_count > 0) &&
- ((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
- (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)))
+ ((txvq->mode == VNET_HW_IF_RX_MODE_INTERRUPT) ||
+ (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE)))
{
vum->ifq_count--;
// Stop the timer if there is no more interrupt interface/queue
// Delete ethernet interface
ethernet_delete_interface (vnm, vui->hw_if_index);
+ // free vrings
+ vec_free (vui->vrings);
+
// Back to pool
pool_put (vum->vhost_user_interfaces, vui);
vlib_worker_thread_barrier_sync (vlib_get_main ());
/* *INDENT-OFF* */
- pool_foreach (vui, vum->vhost_user_interfaces, {
+ pool_foreach (vui, vum->vhost_user_interfaces) {
vhost_user_delete_if (vnm, vm, vui->sw_if_index);
- });
+ }
/* *INDENT-ON* */
vlib_worker_thread_barrier_release (vlib_get_main ());
return 0;
vhost_user_intf_t * vui,
int server_sock_fd,
const char *sock_filename,
- u64 feature_mask, u32 * sw_if_index)
+ u64 feature_mask, u32 * sw_if_index, u8 enable_gso,
+ u8 enable_packed)
{
vnet_sw_interface_t *sw;
int q;
vui->clib_file_index = ~0;
vui->log_base_addr = 0;
vui->if_index = vui - vum->vhost_user_interfaces;
+ vui->enable_gso = enable_gso;
+ vui->enable_packed = enable_packed;
+ /*
+ * enable_gso takes precedence over configurable feature mask if there
+ * is a clash.
+ * if feature mask disables gso, but enable_gso is configured,
+ * then gso is enable
+ * if feature mask enables gso, but enable_gso is not configured,
+ * then gso is enable
+ *
+ * if gso is enable via feature mask, it must enable both host and guest
+ * gso feature mask, we don't support one sided GSO or partial GSO.
+ */
+ if ((vui->enable_gso == 0) &&
+ ((feature_mask & FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS) ==
+ (FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS)))
+ vui->enable_gso = 1;
+ vhost_user_update_gso_interface_count (vui, 1 /* add */ );
mhash_set_mem (&vum->if_index_by_sock_name, vui->sock_filename,
&vui->if_index, 0);
- for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ vec_validate_aligned (vui->vrings, (VHOST_VRING_INIT_MQ_PAIR_SZ << 1) - 1,
+ CLIB_CACHE_LINE_BYTES);
+ vui->num_qid = 2;
+ for (q = 0; q < vec_len (vui->vrings); q++)
vhost_user_vring_init (vui, q);
hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
if (sw_if_index)
*sw_if_index = vui->sw_if_index;
- for (q = 0; q < VHOST_VRING_MAX_N; q++)
- {
- vui->vring_locks[q] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- clib_memset ((void *) vui->vring_locks[q], 0, CLIB_CACHE_LINE_BYTES);
- }
-
vec_validate (vui->per_cpu_tx_qid,
vlib_get_thread_main ()->n_vlib_mains - 1);
vhost_user_tx_thread_placement (vui);
u8 is_server,
u32 * sw_if_index,
u64 feature_mask,
- u8 renumber, u32 custom_dev_instance, u8 * hwaddr)
+ u8 renumber, u32 custom_dev_instance, u8 * hwaddr,
+ u8 enable_gso, u8 enable_packed)
{
vhost_user_intf_t *vui = NULL;
u32 sw_if_idx = ~0;
vlib_worker_thread_barrier_release (vm);
vhost_user_vui_init (vnm, vui, server_sock_fd, sock_filename,
- feature_mask, &sw_if_idx);
+ feature_mask, &sw_if_idx, enable_gso, enable_packed);
vnet_sw_interface_set_mtu (vnm, vui->sw_if_index, 9000);
vhost_user_rx_thread_placement (vui, 1);
const char *sock_filename,
u8 is_server,
u32 sw_if_index,
- u64 feature_mask, u8 renumber, u32 custom_dev_instance)
+ u64 feature_mask, u8 renumber, u32 custom_dev_instance,
+ u8 enable_gso, u8 enable_packed)
{
vhost_user_main_t *vum = &vhost_user_main;
vhost_user_intf_t *vui = NULL;
vhost_user_term_if (vui);
vhost_user_vui_init (vnm, vui, server_sock_fd,
- sock_filename, feature_mask, &sw_if_idx);
+ sock_filename, feature_mask, &sw_if_idx, enable_gso,
+ enable_packed);
if (renumber)
vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
u8 hwaddr[6];
u8 *hw = NULL;
clib_error_t *error = NULL;
+ u8 enable_gso = 0, enable_packed = 0;
/* Get a line of input. */
if (!unformat_user (input, unformat_line_input, line_input))
return 0;
+ /* GSO feature is disable by default */
+ feature_mask &= ~FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS;
+ /* packed-ring feature is disable by default */
+ feature_mask &= ~VIRTIO_FEATURE (VIRTIO_F_RING_PACKED);
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (line_input, "socket %s", &sock_filename))
;
else if (unformat (line_input, "server"))
is_server = 1;
+ else if (unformat (line_input, "gso"))
+ enable_gso = 1;
+ else if (unformat (line_input, "packed"))
+ enable_packed = 1;
else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
;
else
int rv;
if ((rv = vhost_user_create_if (vnm, vm, (char *) sock_filename,
is_server, &sw_if_index, feature_mask,
- renumber, custom_dev_instance, hw)))
+ renumber, custom_dev_instance, hw,
+ enable_gso, enable_packed)))
{
error = clib_error_return (0, "vhost_user_create_if returned %d", rv);
goto done;
vhost_user_intf_details_t *vuid = NULL;
u32 *hw_if_indices = 0;
vnet_hw_interface_t *hi;
- u8 *s = NULL;
int i;
if (!out_vuids)
return -1;
- pool_foreach (vui, vum->vhost_user_interfaces,
- vec_add1 (hw_if_indices, vui->hw_if_index);
- );
+ pool_foreach (vui, vum->vhost_user_interfaces)
+ vec_add1 (hw_if_indices, vui->hw_if_index);
for (i = 0; i < vec_len (hw_if_indices); i++)
{
vuid->num_regions = vui->nregions;
vuid->is_server = vui->unix_server_index != ~0;
vuid->sock_errno = vui->sock_errno;
- strncpy ((char *) vuid->sock_filename, (char *) vui->sock_filename,
- sizeof (vuid->sock_filename));
- vuid->sock_filename[ARRAY_LEN (vuid->sock_filename) - 1] = '\0';
- s = format (s, "%v%c", hi->name, 0);
-
- strncpy ((char *) vuid->if_name, (char *) s,
- ARRAY_LEN (vuid->if_name) - 1);
- _vec_len (s) = 0;
+ snprintf ((char *) vuid->sock_filename, sizeof (vuid->sock_filename),
+ "%s", vui->sock_filename);
+ memcpy_s (vuid->if_name, sizeof (vuid->if_name), hi->name,
+ clib_min (vec_len (hi->name), sizeof (vuid->if_name) - 1));
+ vuid->if_name[sizeof (vuid->if_name) - 1] = 0;
}
- vec_free (s);
vec_free (hw_if_indices);
*out_vuids = r_vuids;
return rv;
}
+static u8 *
+format_vhost_user_desc (u8 * s, va_list * args)
+{
+ char *fmt = va_arg (*args, char *);
+ vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
+ vring_desc_t *desc_table = va_arg (*args, vring_desc_t *);
+ int idx = va_arg (*args, int);
+ u32 *mem_hint = va_arg (*args, u32 *);
+
+ s = format (s, fmt, idx, desc_table[idx].addr, desc_table[idx].len,
+ desc_table[idx].flags, desc_table[idx].next,
+ pointer_to_uword (map_guest_mem (vui, desc_table[idx].addr,
+ mem_hint)));
+ return s;
+}
+
+static u8 *
+format_vhost_user_vring (u8 * s, va_list * args)
+{
+ char *fmt = va_arg (*args, char *);
+ vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
+ int q = va_arg (*args, int);
+
+ s = format (s, fmt, vui->vrings[q].avail->flags, vui->vrings[q].avail->idx,
+ vui->vrings[q].used->flags, vui->vrings[q].used->idx);
+ return s;
+}
+
+static void
+vhost_user_show_fds (vlib_main_t * vm, vhost_user_intf_t * vui, int q)
+{
+ int kickfd = UNIX_GET_FD (vui->vrings[q].kickfd_idx);
+ int callfd = UNIX_GET_FD (vui->vrings[q].callfd_idx);
+
+ vlib_cli_output (vm, " kickfd %d callfd %d errfd %d\n", kickfd, callfd,
+ vui->vrings[q].errfd);
+}
+
+static void
+vhost_user_show_desc (vlib_main_t * vm, vhost_user_intf_t * vui, int q,
+ int show_descr, int show_verbose)
+{
+ int j;
+ u32 mem_hint = 0;
+ u32 idx;
+ u32 n_entries;
+ vring_desc_t *desc_table;
+
+ if (vui->vrings[q].avail && vui->vrings[q].used)
+ vlib_cli_output (vm, "%U", format_vhost_user_vring,
+ " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
+ vui, q);
+
+ vhost_user_show_fds (vm, vui, q);
+
+ if (show_descr)
+ {
+ vlib_cli_output (vm, "\n descriptor table:\n");
+ vlib_cli_output (vm,
+ " slot addr len flags next "
+ "user_addr\n");
+ vlib_cli_output (vm,
+ " ===== ================== ===== ====== ===== "
+ "==================\n");
+ for (j = 0; j < vui->vrings[q].qsz_mask + 1; j++)
+ {
+ desc_table = vui->vrings[q].desc;
+ vlib_cli_output (vm, "%U", format_vhost_user_desc,
+ " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n", vui,
+ desc_table, j, &mem_hint);
+ if (show_verbose && (desc_table[j].flags & VRING_DESC_F_INDIRECT))
+ {
+ n_entries = desc_table[j].len / sizeof (vring_desc_t);
+ desc_table = map_guest_mem (vui, desc_table[j].addr, &mem_hint);
+ if (desc_table)
+ {
+ for (idx = 0; idx < clib_min (20, n_entries); idx++)
+ {
+ vlib_cli_output
+ (vm, "%U", format_vhost_user_desc,
+ "> %-4u 0x%016lx %-5u 0x%04x %-5u 0x%016lx\n", vui,
+ desc_table, idx, &mem_hint);
+ }
+ if (n_entries >= 20)
+ vlib_cli_output (vm, "Skip displaying entries 20...%u\n",
+ n_entries);
+ }
+ }
+ }
+ }
+}
+
+static u8 *
+format_vhost_user_packed_desc (u8 * s, va_list * args)
+{
+ char *fmt = va_arg (*args, char *);
+ vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
+ vring_packed_desc_t *desc_table = va_arg (*args, vring_packed_desc_t *);
+ int idx = va_arg (*args, int);
+ u32 *mem_hint = va_arg (*args, u32 *);
+
+ s = format (s, fmt, idx, desc_table[idx].addr, desc_table[idx].len,
+ desc_table[idx].flags, desc_table[idx].id,
+ pointer_to_uword (map_guest_mem (vui, desc_table[idx].addr,
+ mem_hint)));
+ return s;
+}
+
+static u8 *
+format_vhost_user_vring_packed (u8 * s, va_list * args)
+{
+ char *fmt = va_arg (*args, char *);
+ vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
+ int q = va_arg (*args, int);
+
+ s = format (s, fmt, vui->vrings[q].avail_event->flags,
+ vui->vrings[q].avail_event->off_wrap,
+ vui->vrings[q].used_event->flags,
+ vui->vrings[q].used_event->off_wrap,
+ vui->vrings[q].avail_wrap_counter,
+ vui->vrings[q].used_wrap_counter);
+ return s;
+}
+
+static void
+vhost_user_show_desc_packed (vlib_main_t * vm, vhost_user_intf_t * vui, int q,
+ int show_descr, int show_verbose)
+{
+ int j;
+ u32 mem_hint = 0;
+ u32 idx;
+ u32 n_entries;
+ vring_packed_desc_t *desc_table;
+
+ if (vui->vrings[q].avail_event && vui->vrings[q].used_event)
+ vlib_cli_output (vm, "%U", format_vhost_user_vring_packed,
+ " avail_event.flags %x avail_event.off_wrap %u "
+ "used_event.flags %x used_event.off_wrap %u\n"
+ " avail wrap counter %u, used wrap counter %u\n",
+ vui, q);
+
+ vhost_user_show_fds (vm, vui, q);
+
+ if (show_descr)
+ {
+ vlib_cli_output (vm, "\n descriptor table:\n");
+ vlib_cli_output (vm,
+ " slot addr len flags id "
+ "user_addr\n");
+ vlib_cli_output (vm,
+ " ===== ================== ===== ====== ===== "
+ "==================\n");
+ for (j = 0; j < vui->vrings[q].qsz_mask + 1; j++)
+ {
+ desc_table = vui->vrings[q].packed_desc;
+ vlib_cli_output (vm, "%U", format_vhost_user_packed_desc,
+ " %-5u 0x%016lx %-5u 0x%04x %-5u 0x%016lx\n", vui,
+ desc_table, j, &mem_hint);
+ if (show_verbose && (desc_table[j].flags & VRING_DESC_F_INDIRECT))
+ {
+ n_entries = desc_table[j].len >> 4;
+ desc_table = map_guest_mem (vui, desc_table[j].addr, &mem_hint);
+ if (desc_table)
+ {
+ for (idx = 0; idx < clib_min (20, n_entries); idx++)
+ {
+ vlib_cli_output
+ (vm, "%U", format_vhost_user_packed_desc,
+ "> %-4u 0x%016lx %-5u 0x%04x %-5u 0x%016lx\n", vui,
+ desc_table, idx, &mem_hint);
+ }
+ if (n_entries >= 20)
+ vlib_cli_output (vm, "Skip displaying entries 20...%u\n",
+ n_entries);
+ }
+ }
+ }
+ }
+}
+
clib_error_t *
show_vhost_user_command_fn (vlib_main_t * vm,
unformat_input_t * input,
u32 ci;
int i, j, q;
int show_descr = 0;
+ int show_verbose = 0;
struct feat_struct
{
u8 bit;
static struct feat_struct feat_array[] = {
#define _(s,b) { .str = #s, .bit = b, },
- foreach_virtio_net_feature
+ foreach_virtio_net_features
#undef _
{.str = NULL}
};
}
else if (unformat (input, "descriptors") || unformat (input, "desc"))
show_descr = 1;
+ else if (unformat (input, "verbose"))
+ show_verbose = 1;
else
{
error = clib_error_return (0, "unknown input `%U'",
}
if (vec_len (hw_if_indices) == 0)
{
- pool_foreach (vui, vum->vhost_user_interfaces,
- vec_add1 (hw_if_indices, vui->hw_if_index);
- );
+ pool_foreach (vui, vum->vhost_user_interfaces)
+ vec_add1 (hw_if_indices, vui->hw_if_index);
}
vlib_cli_output (vm, "Virtio vhost-user interfaces");
vlib_cli_output (vm, "Global:\n coalesce frames %d time %e",
vum->coalesce_frames, vum->coalesce_time);
- vlib_cli_output (vm, " number of rx virtqueues in interrupt mode: %d",
+ vlib_cli_output (vm, " Number of rx virtqueues in interrupt mode: %d",
vum->ifq_count);
+ vlib_cli_output (vm, " Number of GSO interfaces: %d", vum->gso_count);
for (i = 0; i < vec_len (hw_if_indices); i++)
{
vlib_cli_output (vm, "Interface: %U (ifindex %d)",
format_vnet_hw_if_index_name, vnm, hw_if_indices[i],
hw_if_indices[i]);
+ vlib_cli_output (vm, " Number of qids %u", vui->num_qid);
+ if (vui->enable_gso)
+ vlib_cli_output (vm, " GSO enable");
+ if (vui->enable_packed)
+ vlib_cli_output (vm, " Packed ring enable");
vlib_cli_output (vm, "virtio_net_hdr_sz %d\n"
" features mask (0x%llx): \n"
vlib_cli_output (vm, " rx placement: ");
- for (qid = 1; qid < VHOST_VRING_MAX_N / 2; qid += 2)
+ for (qid = 1; qid < vui->num_qid / 2; qid += 2)
{
vnet_main_t *vnm = vnet_get_main ();
uword thread_index;
- vnet_hw_interface_rx_mode mode;
+ vnet_hw_if_rx_mode mode;
vhost_user_vring_t *txvq = &vui->vrings[qid];
if (txvq->qid == -1)
&mode);
vlib_cli_output (vm, " thread %d on vring %d, %U\n",
thread_index, qid,
- format_vnet_hw_interface_rx_mode, mode);
+ format_vnet_hw_if_rx_mode, mode);
}
vlib_cli_output (vm, " tx placement: %s\n",
vui->regions[j].mmap_offset,
pointer_to_uword (vui->region_mmap_addr[j]));
}
- for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ for (q = 0; q < vui->num_qid; q++)
{
if (!vui->vrings[q].started)
continue;
vui->vrings[q].last_avail_idx,
vui->vrings[q].last_used_idx);
- if (vui->vrings[q].avail && vui->vrings[q].used)
- vlib_cli_output (vm,
- " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
- vui->vrings[q].avail->flags,
- vui->vrings[q].avail->idx,
- vui->vrings[q].used->flags,
- vui->vrings[q].used->idx);
-
- int kickfd = UNIX_GET_FD (vui->vrings[q].kickfd_idx);
- int callfd = UNIX_GET_FD (vui->vrings[q].callfd_idx);
- vlib_cli_output (vm, " kickfd %d callfd %d errfd %d\n",
- kickfd, callfd, vui->vrings[q].errfd);
-
- if (show_descr)
- {
- vlib_cli_output (vm, "\n descriptor table:\n");
- vlib_cli_output (vm,
- " id addr len flags next user_addr\n");
- vlib_cli_output (vm,
- " ===== ================== ===== ====== ===== ==================\n");
- for (j = 0; j < vui->vrings[q].qsz_mask + 1; j++)
- {
- u32 mem_hint = 0;
- vlib_cli_output (vm,
- " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
- j, vui->vrings[q].desc[j].addr,
- vui->vrings[q].desc[j].len,
- vui->vrings[q].desc[j].flags,
- vui->vrings[q].desc[j].next,
- pointer_to_uword (map_guest_mem
- (vui,
- vui->vrings[q].desc[j].
- addr, &mem_hint)));
- }
- }
+ if (vhost_user_is_packed_ring_supported (vui))
+ vhost_user_show_desc_packed (vm, vui, q, show_descr,
+ show_verbose);
+ else
+ vhost_user_show_desc (vm, vui, q, show_descr, show_verbose);
}
vlib_cli_output (vm, "\n");
}
VLIB_CLI_COMMAND (vhost_user_connect_command, static) = {
.path = "create vhost-user",
.short_help = "create vhost-user socket <socket-filename> [server] "
- "[feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>] ",
+ "[feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>] [gso] "
+ "[packed]",
.function = vhost_user_connect_command_fn,
.is_mp_safe = 1,
};
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (show_vhost_user_command, static) = {
.path = "show vhost-user",
- .short_help = "show vhost-user [<interface> [<interface> [..]]] [descriptors]",
+ .short_help = "show vhost-user [<interface> [<interface> [..]]] "
+ "[[descriptors] [verbose]]",
.function = show_vhost_user_command_fn,
};
/* *INDENT-ON* */
if (vum->dont_dump_vhost_user_memory)
{
- pool_foreach (vui, vum->vhost_user_interfaces,
- unmap_all_mem_regions (vui);
- );
+ pool_foreach (vui, vum->vhost_user_interfaces)
+ unmap_all_mem_regions (vui);
}
}