#ifndef __VIRTIO_VHOST_USER_INLINE_H__
#define __VIRTIO_VHOST_USER_INLINE_H__
/* vhost-user inline functions */
+#include <vppinfra/elog.h>
static_always_inline void *
map_guest_mem (vhost_user_intf_t * vui, uword addr, u32 * hint)
}
}
#endif
- DBG_VQ ("failed to map guest mem addr %llx", addr);
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (el) =
+ {
+ .format = "failed to map guest mem addr %lx",
+ .format_args = "i8",
+ };
+ /* *INDENT-ON* */
+ struct
+ {
+ uword addr;
+ } *ed;
+ ed = ELOG_DATA (&vlib_global_main.elog_main, el);
+ ed->addr = addr;
*hint = 0;
return 0;
}
u64 addr, u64 len, u8 is_host_address)
{
if (PREDICT_TRUE (vui->log_base_addr == 0
- || !(vui->features & (1 << FEAT_VHOST_F_LOG_ALL))))
+ || !(vui->features & VIRTIO_FEATURE (VHOST_F_LOG_ALL))))
{
return;
}
}
if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
{
- DBG_SOCK ("vhost_user_log_dirty_pages(): out of range\n");
+ vu_log_debug (vui, "vhost_user_log_dirty_pages(): out of range\n");
return;
}
CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
vhost_user_main_t *vum = &vhost_user_main;
vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
- vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
- t->device_index);
-
- vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
-
- u32 indent = format_get_indent (s);
+ vhost_user_intf_t *vui = vum->vhost_user_interfaces + t->device_index;
+ vnet_sw_interface_t *sw;
+ u32 indent;
+ if (pool_is_free (vum->vhost_user_interfaces, vui))
+ {
+ s = format (s, "vhost-user interface is deleted");
+ return s;
+ }
+ sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
+ indent = format_get_indent (s);
s = format (s, "%U %U queue %d\n", format_white_space, indent,
format_vnet_sw_interface_name, vnm, sw, t->qid);
return s;
}
+static_always_inline u64
+vhost_user_is_packed_ring_supported (vhost_user_intf_t * vui)
+{
+ return (vui->features & VIRTIO_FEATURE (VIRTIO_F_RING_PACKED));
+}
+
+static_always_inline u64
+vhost_user_is_event_idx_supported (vhost_user_intf_t * vui)
+{
+ return (vui->features & VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX));
+}
+
static_always_inline void
-vhost_user_send_call (vlib_main_t * vm, vhost_user_vring_t * vq)
+vhost_user_kick (vlib_main_t * vm, vhost_user_vring_t * vq)
{
vhost_user_main_t *vum = &vhost_user_main;
u64 x = 1;
int rv;
rv = write (fd, &x, sizeof (x));
- if (rv <= 0)
+ if (PREDICT_FALSE (rv <= 0))
{
clib_unix_warning
("Error: Could not write to unix socket for callfd %d", fd);
vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
}
+static_always_inline u16
+vhost_user_avail_event_idx (vhost_user_vring_t * vq)
+{
+ volatile u16 *event_idx = (u16 *) & (vq->used->ring[vq->qsz_mask + 1]);
+
+ return *event_idx;
+}
+
+static_always_inline u16
+vhost_user_used_event_idx (vhost_user_vring_t * vq)
+{
+ volatile u16 *event_idx = (u16 *) & (vq->avail->ring[vq->qsz_mask + 1]);
+
+ return *event_idx;
+}
+
+static_always_inline u16
+vhost_user_need_event (u16 event_idx, u16 new_idx, u16 old_idx)
+{
+ return ((u16) (new_idx - event_idx - 1) < (u16) (new_idx - old_idx));
+}
+
+static_always_inline void
+vhost_user_send_call_event_idx (vlib_main_t * vm, vhost_user_vring_t * vq)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u8 first_kick = vq->first_kick;
+ u16 event_idx = vhost_user_used_event_idx (vq);
+
+ vq->first_kick = 1;
+ if (vhost_user_need_event (event_idx, vq->last_used_idx, vq->last_kick) ||
+ PREDICT_FALSE (!first_kick))
+ {
+ vhost_user_kick (vm, vq);
+ vq->last_kick = event_idx;
+ }
+ else
+ {
+ vq->n_since_last_int = 0;
+ vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
+ }
+}
+
+static_always_inline void
+vhost_user_send_call_event_idx_packed (vlib_main_t * vm,
+ vhost_user_vring_t * vq)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u8 first_kick = vq->first_kick;
+ u16 off_wrap;
+ u16 event_idx;
+ u16 new_idx = vq->last_used_idx;
+ u16 old_idx = vq->last_kick;
+
+ if (PREDICT_TRUE (vq->avail_event->flags == VRING_EVENT_F_DESC))
+ {
+ CLIB_COMPILER_BARRIER ();
+ off_wrap = vq->avail_event->off_wrap;
+ event_idx = off_wrap & 0x7fff;
+ if (vq->used_wrap_counter != (off_wrap >> 15))
+ event_idx -= (vq->qsz_mask + 1);
+
+ if (new_idx <= old_idx)
+ old_idx -= (vq->qsz_mask + 1);
+
+ vq->first_kick = 1;
+ vq->last_kick = event_idx;
+ if (vhost_user_need_event (event_idx, new_idx, old_idx) ||
+ PREDICT_FALSE (!first_kick))
+ vhost_user_kick (vm, vq);
+ else
+ {
+ vq->n_since_last_int = 0;
+ vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
+ }
+ }
+ else
+ vhost_user_kick (vm, vq);
+}
+
+static_always_inline void
+vhost_user_send_call (vlib_main_t * vm, vhost_user_intf_t * vui,
+ vhost_user_vring_t * vq)
+{
+ if (vhost_user_is_event_idx_supported (vui))
+ {
+ if (vhost_user_is_packed_ring_supported (vui))
+ vhost_user_send_call_event_idx_packed (vm, vq);
+ else
+ vhost_user_send_call_event_idx (vm, vq);
+ }
+ else
+ vhost_user_kick (vm, vq);
+}
+
static_always_inline u8
vui_is_link_up (vhost_user_intf_t * vui)
{
return vui->admin_up && vui->is_ready;
}
+static_always_inline void
+vhost_user_update_gso_interface_count (vhost_user_intf_t * vui, u8 add)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+
+ if (vui->enable_gso)
+ {
+ if (add)
+ {
+ vum->gso_count++;
+ }
+ else
+ {
+ ASSERT (vum->gso_count > 0);
+ vum->gso_count--;
+ }
+ }
+}
+
+static_always_inline u8
+vhost_user_packed_desc_available (vhost_user_vring_t * vring, u16 idx)
+{
+ return (((vring->packed_desc[idx].flags & VRING_DESC_F_AVAIL) ==
+ vring->avail_wrap_counter));
+}
+
+static_always_inline void
+vhost_user_advance_last_avail_idx (vhost_user_vring_t * vring)
+{
+ vring->last_avail_idx++;
+ if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
+ {
+ vring->avail_wrap_counter ^= VRING_DESC_F_AVAIL;
+ vring->last_avail_idx = 0;
+ }
+}
+
+static_always_inline void
+vhost_user_advance_last_avail_table_idx (vhost_user_intf_t * vui,
+ vhost_user_vring_t * vring,
+ u8 chained)
+{
+ if (chained)
+ {
+ vring_packed_desc_t *desc_table = vring->packed_desc;
+
+ /* pick up the slot of the next avail idx */
+ while (desc_table[vring->last_avail_idx & vring->qsz_mask].flags &
+ VRING_DESC_F_NEXT)
+ vhost_user_advance_last_avail_idx (vring);
+ }
+
+ vhost_user_advance_last_avail_idx (vring);
+}
+
+static_always_inline void
+vhost_user_undo_advanced_last_avail_idx (vhost_user_vring_t * vring)
+{
+ if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
+ vring->avail_wrap_counter ^= VRING_DESC_F_AVAIL;
+
+ if (PREDICT_FALSE (vring->last_avail_idx == 0))
+ vring->last_avail_idx = vring->qsz_mask;
+ else
+ vring->last_avail_idx--;
+}
+
+static_always_inline void
+vhost_user_dequeue_descs (vhost_user_vring_t * rxvq,
+ virtio_net_hdr_mrg_rxbuf_t * hdr,
+ u16 * n_descs_processed)
+{
+ u16 i;
+
+ *n_descs_processed -= (hdr->num_buffers - 1);
+ for (i = 0; i < hdr->num_buffers - 1; i++)
+ vhost_user_undo_advanced_last_avail_idx (rxvq);
+}
+
+static_always_inline void
+vhost_user_dequeue_chained_descs (vhost_user_vring_t * rxvq,
+ u16 * n_descs_processed)
+{
+ while (*n_descs_processed)
+ {
+ vhost_user_undo_advanced_last_avail_idx (rxvq);
+ (*n_descs_processed)--;
+ }
+}
+
+static_always_inline void
+vhost_user_advance_last_used_idx (vhost_user_vring_t * vring)
+{
+ vring->last_used_idx++;
+ if (PREDICT_FALSE ((vring->last_used_idx & vring->qsz_mask) == 0))
+ {
+ vring->used_wrap_counter ^= 1;
+ vring->last_used_idx = 0;
+ }
+}
+
#endif
/*