{
case VHOST_USER_GET_FEATURES:
msg.flags |= 4;
- msg.u64 = (1ULL << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
- (1ULL << FEAT_VIRTIO_NET_F_CTRL_VQ) |
- (1ULL << FEAT_VIRTIO_F_ANY_LAYOUT) |
- (1ULL << FEAT_VIRTIO_F_INDIRECT_DESC) |
- (1ULL << FEAT_VHOST_F_LOG_ALL) |
- (1ULL << FEAT_VIRTIO_NET_F_GUEST_ANNOUNCE) |
- (1ULL << FEAT_VIRTIO_NET_F_MQ) |
- (1ULL << FEAT_VHOST_USER_F_PROTOCOL_FEATURES) |
- (1ULL << FEAT_VIRTIO_F_VERSION_1);
+ msg.u64 = VIRTIO_FEATURE (VIRTIO_NET_F_MRG_RXBUF) |
+ VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ) |
+ VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT) |
+ VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC) |
+ VIRTIO_FEATURE (VHOST_F_LOG_ALL) |
+ VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_ANNOUNCE) |
+ VIRTIO_FEATURE (VIRTIO_NET_F_MQ) |
+ VIRTIO_FEATURE (VHOST_USER_F_PROTOCOL_FEATURES) |
+ VIRTIO_FEATURE (VIRTIO_F_VERSION_1);
msg.u64 &= vui->feature_mask;
+
+ if (vui->enable_gso)
+ msg.u64 |= FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS;
+ if (vui->enable_packed)
+ msg.u64 |= VIRTIO_FEATURE (VIRTIO_F_RING_PACKED);
+
msg.size = sizeof (msg.u64);
vu_log_debug (vui, "if %d msg VHOST_USER_GET_FEATURES - reply "
"0x%016llx", vui->hw_if_index, msg.u64);
vui->features = msg.u64;
if (vui->features &
- ((1 << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
- (1ULL << FEAT_VIRTIO_F_VERSION_1)))
+ (VIRTIO_FEATURE (VIRTIO_NET_F_MRG_RXBUF) |
+ VIRTIO_FEATURE (VIRTIO_F_VERSION_1)))
vui->virtio_net_hdr_sz = 12;
else
vui->virtio_net_hdr_sz = 10;
vui->is_any_layout =
- (vui->features & (1 << FEAT_VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
+ (vui->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
ASSERT (vui->virtio_net_hdr_sz < VLIB_BUFFER_PRE_DATA_SIZE);
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vui->hw_if_index);
+ if (vui->enable_gso &&
+ ((vui->features & FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS)
+ == FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS))
+ hw->flags |=
+ (VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD);
+ else
+ hw->flags &= ~(VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD);
vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
vui->is_ready = 0;
+ vhost_user_update_iface_state (vui);
break;
case VHOST_USER_SET_MEM_TABLE:
vui->nregions++;
}
+
+ /*
+ * Re-compute desc, used, and avail descriptor table if vring address
+ * is set.
+ */
+ for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ {
+ if (vui->vrings[q].desc_user_addr &&
+ vui->vrings[q].used_user_addr && vui->vrings[q].avail_user_addr)
+ {
+ vui->vrings[q].desc =
+ map_user_mem (vui, vui->vrings[q].desc_user_addr);
+ vui->vrings[q].used =
+ map_user_mem (vui, vui->vrings[q].used_user_addr);
+ vui->vrings[q].avail =
+ map_user_mem (vui, vui->vrings[q].avail_user_addr);
+ }
+ }
vlib_worker_thread_barrier_release (vm);
break;
goto close_socket;
}
+ vui->vrings[msg.state.index].desc_user_addr = msg.addr.desc_user_addr;
+ vui->vrings[msg.state.index].used_user_addr = msg.addr.used_user_addr;
+ vui->vrings[msg.state.index].avail_user_addr = msg.addr.avail_user_addr;
+
vlib_worker_thread_barrier_sync (vm);
vui->vrings[msg.state.index].desc = desc;
vui->vrings[msg.state.index].used = used;
/* Spec says: If VHOST_USER_F_PROTOCOL_FEATURES has not been negotiated,
the ring is initialized in an enabled state. */
- if (!(vui->features & (1 << FEAT_VHOST_USER_F_PROTOCOL_FEATURES)))
+ if (!(vui->features & VIRTIO_FEATURE (VHOST_USER_F_PROTOCOL_FEATURES)))
vui->vrings[msg.state.index].enabled = 1;
vui->vrings[msg.state.index].last_used_idx =
vui->vrings[msg.state.index].used->idx;
/* tell driver that we don't want interrupts */
- vui->vrings[msg.state.index].used->flags = VRING_USED_F_NO_NOTIFY;
+ if (vhost_user_is_packed_ring_supported (vui))
+ vui->vrings[msg.state.index].used_event->flags =
+ VRING_EVENT_F_DISABLE;
+ else
+ vui->vrings[msg.state.index].used->flags = VRING_USED_F_NO_NOTIFY;
vlib_worker_thread_barrier_release (vm);
vhost_user_update_iface_state (vui);
break;
break;
case VHOST_USER_SET_VRING_BASE:
- vu_log_debug (vui, "if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
+ vu_log_debug (vui,
+ "if %d msg VHOST_USER_SET_VRING_BASE idx %d num 0x%x",
vui->hw_if_index, msg.state.index, msg.state.num);
vlib_worker_thread_barrier_sync (vm);
vui->vrings[msg.state.index].last_avail_idx = msg.state.num;
+ if (vhost_user_is_packed_ring_supported (vui))
+ {
+ /*
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | last avail idx | | last used idx | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * ^ ^
+ * | |
+ * avail wrap counter used wrap counter
+ */
+ /* last avail idx at bit 0-14. */
+ vui->vrings[msg.state.index].last_avail_idx =
+ msg.state.num & 0x7fff;
+ /* avail wrap counter at bit 15 */
+ vui->vrings[msg.state.index].avail_wrap_counter =
+ ! !(msg.state.num & (1 << 15));
+
+ /*
+ * Although last_used_idx is passed in the upper 16 bits in qemu
+ * implementation, in practice, last_avail_idx and last_used_idx are
+ * usually the same. As a result, DPDK does not bother to pass us
+ * last_used_idx. The spec is not clear on thex coding. I figured it
+ * out by reading the qemu code. So let's just read last_avail_idx
+ * and set last_used_idx equals to last_avail_idx.
+ */
+ vui->vrings[msg.state.index].last_used_idx =
+ vui->vrings[msg.state.index].last_avail_idx;
+ vui->vrings[msg.state.index].used_wrap_counter =
+ vui->vrings[msg.state.index].avail_wrap_counter;
+
+ if (vui->vrings[msg.state.index].avail_wrap_counter == 1)
+ vui->vrings[msg.state.index].avail_wrap_counter =
+ VRING_DESC_F_AVAIL;
+ }
vlib_worker_thread_barrier_release (vm);
break;
* closing the vring also initializes the vring last_avail_idx
*/
msg.state.num = vui->vrings[msg.state.index].last_avail_idx;
+ if (vhost_user_is_packed_ring_supported (vui))
+ {
+ msg.state.num =
+ (vui->vrings[msg.state.index].last_avail_idx & 0x7fff) |
+ (! !vui->vrings[msg.state.index].avail_wrap_counter << 15);
+ msg.state.num |=
+ ((vui->vrings[msg.state.index].last_used_idx & 0x7fff) |
+ (! !vui->vrings[msg.state.index].used_wrap_counter << 15)) << 16;
+ }
msg.flags |= 4;
msg.size = sizeof (msg.state);
*/
vhost_user_vring_close (vui, msg.state.index);
vlib_worker_thread_barrier_release (vm);
- vu_log_debug (vui, "if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
+ vu_log_debug (vui,
+ "if %d msg VHOST_USER_GET_VRING_BASE idx %d num 0x%x",
vui->hw_if_index, msg.state.index, msg.state.num);
n =
send (uf->file_descriptor, &msg, VHOST_USER_MSG_HDR_SZ + msg.size, 0);
static clib_error_t *
vhost_user_init (vlib_main_t * vm)
{
- clib_error_t *error;
vhost_user_main_t *vum = &vhost_user_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
- error = vlib_call_init_function (vm, ip4_init);
- if (error)
- return error;
-
vum->log_default = vlib_log_register_class ("vhost-user", 0);
vum->coalesce_frames = 32;
return 0;
}
-VLIB_INIT_FUNCTION (vhost_user_init);
+/* *INDENT-OFF* */
+VLIB_INIT_FUNCTION (vhost_user_init) =
+{
+ .runs_after = VLIB_INITS("ip4_init"),
+};
+/* *INDENT-ON* */
static uword
vhost_user_send_interrupt_process (vlib_main_t * vm,
// disconnect interface sockets
vhost_user_if_disconnect (vui);
+ vhost_user_update_gso_interface_count (vui, 0 /* delete */ );
vhost_user_update_iface_state (vui);
for (q = 0; q < VHOST_VRING_MAX_N; q++)
vnet_hw_interface_t *hwif;
u16 qid;
- if (!(hwif = vnet_get_sup_hw_interface (vnm, sw_if_index)) ||
- hwif->dev_class_index != vhost_user_device_class.index)
+ if (!
+ (hwif =
+ vnet_get_sup_hw_interface_api_visible_or_null (vnm, sw_if_index))
+ || hwif->dev_class_index != vhost_user_device_class.index)
return VNET_API_ERROR_INVALID_SW_IF_INDEX;
vui = pool_elt_at_index (vum->vhost_user_interfaces, hwif->dev_instance);
vhost_user_intf_t * vui,
int server_sock_fd,
const char *sock_filename,
- u64 feature_mask, u32 * sw_if_index)
+ u64 feature_mask, u32 * sw_if_index, u8 enable_gso,
+ u8 enable_packed)
{
vnet_sw_interface_t *sw;
int q;
vui->clib_file_index = ~0;
vui->log_base_addr = 0;
vui->if_index = vui - vum->vhost_user_interfaces;
+ vui->enable_gso = enable_gso;
+ vui->enable_packed = enable_packed;
+ /*
+ * enable_gso takes precedence over configurable feature mask if there
+ * is a clash.
+ * if feature mask disables gso, but enable_gso is configured,
+ * then gso is enable
+ * if feature mask enables gso, but enable_gso is not configured,
+ * then gso is enable
+ *
+ * if gso is enable via feature mask, it must enable both host and guest
+ * gso feature mask, we don't support one sided GSO or partial GSO.
+ */
+ if ((vui->enable_gso == 0) &&
+ ((feature_mask & FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS) ==
+ (FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS)))
+ vui->enable_gso = 1;
+ vhost_user_update_gso_interface_count (vui, 1 /* add */ );
mhash_set_mem (&vum->if_index_by_sock_name, vui->sock_filename,
&vui->if_index, 0);
u8 is_server,
u32 * sw_if_index,
u64 feature_mask,
- u8 renumber, u32 custom_dev_instance, u8 * hwaddr)
+ u8 renumber, u32 custom_dev_instance, u8 * hwaddr,
+ u8 enable_gso, u8 enable_packed)
{
vhost_user_intf_t *vui = NULL;
u32 sw_if_idx = ~0;
vlib_worker_thread_barrier_release (vm);
vhost_user_vui_init (vnm, vui, server_sock_fd, sock_filename,
- feature_mask, &sw_if_idx);
+ feature_mask, &sw_if_idx, enable_gso, enable_packed);
vnet_sw_interface_set_mtu (vnm, vui->sw_if_index, 9000);
vhost_user_rx_thread_placement (vui, 1);
const char *sock_filename,
u8 is_server,
u32 sw_if_index,
- u64 feature_mask, u8 renumber, u32 custom_dev_instance)
+ u64 feature_mask, u8 renumber, u32 custom_dev_instance,
+ u8 enable_gso, u8 enable_packed)
{
vhost_user_main_t *vum = &vhost_user_main;
vhost_user_intf_t *vui = NULL;
vnet_hw_interface_t *hwif;
uword *if_index;
- if (!(hwif = vnet_get_sup_hw_interface (vnm, sw_if_index)) ||
- hwif->dev_class_index != vhost_user_device_class.index)
+ if (!
+ (hwif =
+ vnet_get_sup_hw_interface_api_visible_or_null (vnm, sw_if_index))
+ || hwif->dev_class_index != vhost_user_device_class.index)
return VNET_API_ERROR_INVALID_SW_IF_INDEX;
if (sock_filename == NULL || !(strlen (sock_filename) > 0))
vhost_user_term_if (vui);
vhost_user_vui_init (vnm, vui, server_sock_fd,
- sock_filename, feature_mask, &sw_if_idx);
+ sock_filename, feature_mask, &sw_if_idx, enable_gso,
+ enable_packed);
if (renumber)
vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
u8 hwaddr[6];
u8 *hw = NULL;
clib_error_t *error = NULL;
+ u8 enable_gso = 0, enable_packed = 0;
/* Get a line of input. */
if (!unformat_user (input, unformat_line_input, line_input))
return 0;
+ /* GSO feature is disable by default */
+ feature_mask &= ~FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS;
+ /* packed-ring feature is disable by default */
+ feature_mask &= ~VIRTIO_FEATURE (VIRTIO_F_RING_PACKED);
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (line_input, "socket %s", &sock_filename))
;
else if (unformat (line_input, "server"))
is_server = 1;
+ else if (unformat (line_input, "gso"))
+ enable_gso = 1;
+ else if (unformat (line_input, "packed"))
+ enable_packed = 1;
else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
;
else
int rv;
if ((rv = vhost_user_create_if (vnm, vm, (char *) sock_filename,
is_server, &sw_if_index, feature_mask,
- renumber, custom_dev_instance, hw)))
+ renumber, custom_dev_instance, hw,
+ enable_gso, enable_packed)))
{
error = clib_error_return (0, "vhost_user_create_if returned %d", rv);
goto done;
&sw_if_index))
{
vnet_hw_interface_t *hwif =
- vnet_get_sup_hw_interface (vnm, sw_if_index);
+ vnet_get_sup_hw_interface_api_visible_or_null (vnm, sw_if_index);
if (hwif == NULL ||
vhost_user_device_class.index != hwif->dev_class_index)
{
vhost_user_intf_details_t *vuid = NULL;
u32 *hw_if_indices = 0;
vnet_hw_interface_t *hi;
- u8 *s = NULL;
int i;
if (!out_vuids)
vuid->num_regions = vui->nregions;
vuid->is_server = vui->unix_server_index != ~0;
vuid->sock_errno = vui->sock_errno;
- strncpy ((char *) vuid->sock_filename, (char *) vui->sock_filename,
- sizeof (vuid->sock_filename));
- vuid->sock_filename[ARRAY_LEN (vuid->sock_filename) - 1] = '\0';
- s = format (s, "%v%c", hi->name, 0);
-
- strncpy ((char *) vuid->if_name, (char *) s,
- ARRAY_LEN (vuid->if_name) - 1);
- _vec_len (s) = 0;
+ snprintf ((char *) vuid->sock_filename, sizeof (vuid->sock_filename),
+ "%s", vui->sock_filename);
+ memcpy_s (vuid->if_name, sizeof (vuid->if_name), hi->name,
+ clib_min (vec_len (hi->name), sizeof (vuid->if_name) - 1));
+ vuid->if_name[sizeof (vuid->if_name) - 1] = 0;
}
- vec_free (s);
vec_free (hw_if_indices);
*out_vuids = r_vuids;
return rv;
}
+static u8 *
+format_vhost_user_desc (u8 * s, va_list * args)
+{
+ char *fmt = va_arg (*args, char *);
+ vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
+ vring_desc_t *desc_table = va_arg (*args, vring_desc_t *);
+ int idx = va_arg (*args, int);
+ u32 *mem_hint = va_arg (*args, u32 *);
+
+ s = format (s, fmt, idx, desc_table[idx].addr, desc_table[idx].len,
+ desc_table[idx].flags, desc_table[idx].next,
+ pointer_to_uword (map_guest_mem (vui, desc_table[idx].addr,
+ mem_hint)));
+ return s;
+}
+
+static u8 *
+format_vhost_user_vring (u8 * s, va_list * args)
+{
+ char *fmt = va_arg (*args, char *);
+ vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
+ int q = va_arg (*args, int);
+
+ s = format (s, fmt, vui->vrings[q].avail->flags, vui->vrings[q].avail->idx,
+ vui->vrings[q].used->flags, vui->vrings[q].used->idx);
+ return s;
+}
+
+static void
+vhost_user_show_fds (vlib_main_t * vm, vhost_user_intf_t * vui, int q)
+{
+ int kickfd = UNIX_GET_FD (vui->vrings[q].kickfd_idx);
+ int callfd = UNIX_GET_FD (vui->vrings[q].callfd_idx);
+
+ vlib_cli_output (vm, " kickfd %d callfd %d errfd %d\n", kickfd, callfd,
+ vui->vrings[q].errfd);
+}
+
+static void
+vhost_user_show_desc (vlib_main_t * vm, vhost_user_intf_t * vui, int q,
+ int show_descr, int show_verbose)
+{
+ int j;
+ u32 mem_hint = 0;
+ u32 idx;
+ u32 n_entries;
+ vring_desc_t *desc_table;
+
+ if (vui->vrings[q].avail && vui->vrings[q].used)
+ vlib_cli_output (vm, "%U", format_vhost_user_vring,
+ " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
+ vui, q);
+
+ vhost_user_show_fds (vm, vui, q);
+
+ if (show_descr)
+ {
+ vlib_cli_output (vm, "\n descriptor table:\n");
+ vlib_cli_output (vm,
+ " slot addr len flags next "
+ "user_addr\n");
+ vlib_cli_output (vm,
+ " ===== ================== ===== ====== ===== "
+ "==================\n");
+ for (j = 0; j < vui->vrings[q].qsz_mask + 1; j++)
+ {
+ desc_table = vui->vrings[q].desc;
+ vlib_cli_output (vm, "%U", format_vhost_user_desc,
+ " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n", vui,
+ desc_table, j, &mem_hint);
+ if (show_verbose && (desc_table[j].flags & VRING_DESC_F_INDIRECT))
+ {
+ n_entries = desc_table[j].len / sizeof (vring_desc_t);
+ desc_table = map_guest_mem (vui, desc_table[j].addr, &mem_hint);
+ if (desc_table)
+ {
+ for (idx = 0; idx < clib_min (20, n_entries); idx++)
+ {
+ vlib_cli_output
+ (vm, "%U", format_vhost_user_desc,
+ "> %-4u 0x%016lx %-5u 0x%04x %-5u 0x%016lx\n", vui,
+ desc_table, idx, &mem_hint);
+ }
+ if (n_entries >= 20)
+ vlib_cli_output (vm, "Skip displaying entries 20...%u\n",
+ n_entries);
+ }
+ }
+ }
+ }
+}
+
+static u8 *
+format_vhost_user_packed_desc (u8 * s, va_list * args)
+{
+ char *fmt = va_arg (*args, char *);
+ vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
+ vring_packed_desc_t *desc_table = va_arg (*args, vring_packed_desc_t *);
+ int idx = va_arg (*args, int);
+ u32 *mem_hint = va_arg (*args, u32 *);
+
+ s = format (s, fmt, idx, desc_table[idx].addr, desc_table[idx].len,
+ desc_table[idx].flags, desc_table[idx].id,
+ pointer_to_uword (map_guest_mem (vui, desc_table[idx].addr,
+ mem_hint)));
+ return s;
+}
+
+static u8 *
+format_vhost_user_vring_packed (u8 * s, va_list * args)
+{
+ char *fmt = va_arg (*args, char *);
+ vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
+ int q = va_arg (*args, int);
+
+ s = format (s, fmt, vui->vrings[q].avail_event->flags,
+ vui->vrings[q].avail_event->off_wrap,
+ vui->vrings[q].used_event->flags,
+ vui->vrings[q].used_event->off_wrap,
+ vui->vrings[q].avail_wrap_counter,
+ vui->vrings[q].used_wrap_counter);
+ return s;
+}
+
+static void
+vhost_user_show_desc_packed (vlib_main_t * vm, vhost_user_intf_t * vui, int q,
+ int show_descr, int show_verbose)
+{
+ int j;
+ u32 mem_hint = 0;
+ u32 idx;
+ u32 n_entries;
+ vring_packed_desc_t *desc_table;
+
+ if (vui->vrings[q].avail_event && vui->vrings[q].used_event)
+ vlib_cli_output (vm, "%U", format_vhost_user_vring_packed,
+ " avail_event.flags %x avail_event.off_wrap %u "
+ "used_event.flags %x used_event.off_wrap %u\n"
+ " avail wrap counter %u, used wrap counter %u\n",
+ vui, q);
+
+ vhost_user_show_fds (vm, vui, q);
+
+ if (show_descr)
+ {
+ vlib_cli_output (vm, "\n descriptor table:\n");
+ vlib_cli_output (vm,
+ " slot addr len flags id "
+ "user_addr\n");
+ vlib_cli_output (vm,
+ " ===== ================== ===== ====== ===== "
+ "==================\n");
+ for (j = 0; j < vui->vrings[q].qsz_mask + 1; j++)
+ {
+ desc_table = vui->vrings[q].packed_desc;
+ vlib_cli_output (vm, "%U", format_vhost_user_packed_desc,
+ " %-5u 0x%016lx %-5u 0x%04x %-5u 0x%016lx\n", vui,
+ desc_table, j, &mem_hint);
+ if (show_verbose && (desc_table[j].flags & VRING_DESC_F_INDIRECT))
+ {
+ n_entries = desc_table[j].len >> 4;
+ desc_table = map_guest_mem (vui, desc_table[j].addr, &mem_hint);
+ if (desc_table)
+ {
+ for (idx = 0; idx < clib_min (20, n_entries); idx++)
+ {
+ vlib_cli_output
+ (vm, "%U", format_vhost_user_packed_desc,
+ "> %-4u 0x%016lx %-5u 0x%04x %-5u 0x%016lx\n", vui,
+ desc_table, idx, &mem_hint);
+ }
+ if (n_entries >= 20)
+ vlib_cli_output (vm, "Skip displaying entries 20...%u\n",
+ n_entries);
+ }
+ }
+ }
+ }
+}
+
clib_error_t *
show_vhost_user_command_fn (vlib_main_t * vm,
unformat_input_t * input,
u32 ci;
int i, j, q;
int show_descr = 0;
+ int show_verbose = 0;
struct feat_struct
{
u8 bit;
static struct feat_struct feat_array[] = {
#define _(s,b) { .str = #s, .bit = b, },
- foreach_virtio_net_feature
+ foreach_virtio_net_features
#undef _
{.str = NULL}
};
}
else if (unformat (input, "descriptors") || unformat (input, "desc"))
show_descr = 1;
+ else if (unformat (input, "verbose"))
+ show_verbose = 1;
else
{
error = clib_error_return (0, "unknown input `%U'",
vlib_cli_output (vm, "Virtio vhost-user interfaces");
vlib_cli_output (vm, "Global:\n coalesce frames %d time %e",
vum->coalesce_frames, vum->coalesce_time);
- vlib_cli_output (vm, " number of rx virtqueues in interrupt mode: %d",
+ vlib_cli_output (vm, " Number of rx virtqueues in interrupt mode: %d",
vum->ifq_count);
+ vlib_cli_output (vm, " Number of GSO interfaces: %d", vum->gso_count);
for (i = 0; i < vec_len (hw_if_indices); i++)
{
vlib_cli_output (vm, "Interface: %U (ifindex %d)",
format_vnet_hw_if_index_name, vnm, hw_if_indices[i],
hw_if_indices[i]);
+ if (vui->enable_gso)
+ vlib_cli_output (vm, " GSO enable");
+ if (vui->enable_packed)
+ vlib_cli_output (vm, " Packed ring enable");
vlib_cli_output (vm, "virtio_net_hdr_sz %d\n"
" features mask (0x%llx): \n"
vui->vrings[q].last_avail_idx,
vui->vrings[q].last_used_idx);
- if (vui->vrings[q].avail && vui->vrings[q].used)
- vlib_cli_output (vm,
- " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
- vui->vrings[q].avail->flags,
- vui->vrings[q].avail->idx,
- vui->vrings[q].used->flags,
- vui->vrings[q].used->idx);
-
- int kickfd = UNIX_GET_FD (vui->vrings[q].kickfd_idx);
- int callfd = UNIX_GET_FD (vui->vrings[q].callfd_idx);
- vlib_cli_output (vm, " kickfd %d callfd %d errfd %d\n",
- kickfd, callfd, vui->vrings[q].errfd);
-
- if (show_descr)
- {
- vlib_cli_output (vm, "\n descriptor table:\n");
- vlib_cli_output (vm,
- " id addr len flags next user_addr\n");
- vlib_cli_output (vm,
- " ===== ================== ===== ====== ===== ==================\n");
- for (j = 0; j < vui->vrings[q].qsz_mask + 1; j++)
- {
- u32 mem_hint = 0;
- vlib_cli_output (vm,
- " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
- j, vui->vrings[q].desc[j].addr,
- vui->vrings[q].desc[j].len,
- vui->vrings[q].desc[j].flags,
- vui->vrings[q].desc[j].next,
- pointer_to_uword (map_guest_mem
- (vui,
- vui->vrings[q].desc[j].
- addr, &mem_hint)));
- }
- }
+ if (vhost_user_is_packed_ring_supported (vui))
+ vhost_user_show_desc_packed (vm, vui, q, show_descr,
+ show_verbose);
+ else
+ vhost_user_show_desc (vm, vui, q, show_descr, show_verbose);
}
vlib_cli_output (vm, "\n");
}
VLIB_CLI_COMMAND (vhost_user_connect_command, static) = {
.path = "create vhost-user",
.short_help = "create vhost-user socket <socket-filename> [server] "
- "[feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>] ",
+ "[feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>] [gso] "
+ "[packed]",
.function = vhost_user_connect_command_fn,
.is_mp_safe = 1,
};
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (show_vhost_user_command, static) = {
.path = "show vhost-user",
- .short_help = "show vhost-user [<interface> [<interface> [..]]] [descriptors]",
+ .short_help = "show vhost-user [<interface> [<interface> [..]]] "
+ "[[descriptors] [verbose]]",
.function = show_vhost_user_command_fn,
};
/* *INDENT-ON* */