{
// FIXME: check if the new dev instance is already used
vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
+ hi->dev_instance);
+
vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
hi->dev_instance, ~0);
vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
new_dev_instance;
- DBG_SOCK ("renumbered vhost-user interface dev_instance %d to %d",
- hi->dev_instance, new_dev_instance);
+ vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
+ hi->dev_instance, new_dev_instance);
return 0;
}
static_always_inline int
vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
{
- return __sync_lock_test_and_set (vui->vring_locks[qid], 1);
+ return clib_atomic_test_and_set (vui->vring_locks[qid]);
}
/**
static_always_inline void
vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
{
- *vui->vring_locks[qid] = 0;
+ clib_atomic_release (vui->vring_locks[qid]);
}
static_always_inline void
vring_desc_t *hdr_desc = 0;
u32 hint = 0;
- memset (t, 0, sizeof (*t));
+ clib_memset (t, 0, sizeof (*t));
t->device_index = vui - vum->vhost_user_interfaces;
t->qid = qid;
CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
- clib_memcpy (dst0, (void *) cpy[0].src, cpy[0].len);
- clib_memcpy (dst1, (void *) cpy[1].src, cpy[1].len);
+ clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
+ clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
{
if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
return 1;
- clib_memcpy (dst0, (void *) cpy->src, cpy->len);
+ clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
copy_len -= 1;
cpy += 1;
vlib_node_runtime_t *
node, vlib_frame_t * frame)
{
- u32 *buffers = vlib_frame_args (frame);
+ u32 *buffers = vlib_frame_vector_args (frame);
u32 n_left = frame->n_vectors;
vhost_user_main_t *vum = &vhost_user_main;
vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
goto done3;
}
- if (PREDICT_FALSE (!vui->is_up))
+ if (PREDICT_FALSE (!vui->is_ready))
{
error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
goto done3;
VHOST_VRING_IDX_RX (*vec_elt_at_index
(vui->per_cpu_tx_qid, thread_index));
rxvq = &vui->vrings[qid];
+ if (PREDICT_FALSE (rxvq->avail == 0))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
+ goto done3;
+ }
+
if (PREDICT_FALSE (vui->use_tx_spinlock))
vhost_user_vring_lock (vui, qid);
thread_index, vui->sw_if_index, n_left);
}
- vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
return frame->n_vectors;
}
txvq->used->flags = 0;
else
{
- clib_warning ("BUG: unhandled mode %d changed for if %d queue %d", mode,
- hw_if_index, qid);
+ vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
+ hw_if_index, qid);
return clib_error_return (0, "unsupported");
}
vhost_user_main_t *vum = &vhost_user_main;
vhost_user_intf_t *vui =
pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
- u32 hw_flags = 0;
+ u8 link_old, link_new;
+
+ link_old = vui_is_link_up (vui);
+
vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
- hw_flags = vui->admin_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
- vnet_hw_interface_set_flags (vnm, vui->hw_if_index, hw_flags);
+ link_new = vui_is_link_up (vui);
+
+ if (link_old != link_new)
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
+ VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
return /* no error */ 0;
}
-#ifndef CLIB_MARCH_VARIANT
/* *INDENT-OFF* */
VNET_DEVICE_CLASS (vhost_user_device_class) = {
.name = "vhost-user",
.format_tx_trace = format_vhost_trace,
};
-#endif
-
/* *INDENT-ON* */
/*