*/
-#define VHOST_USER_DEBUG_SOCKET 0
#define VHOST_DEBUG_VQ 0
-#if VHOST_USER_DEBUG_SOCKET == 1
-#define DBG_SOCK(args...) clib_warning(args);
-#else
-#define DBG_SOCK(args...)
-#endif
+#define DBG_SOCK(args...) \
+ { \
+ vhost_user_main_t *_vum = &vhost_user_main; \
+ if (_vum->debug) \
+ clib_warning(args); \
+ };
#if VHOST_DEBUG_VQ == 1
#define DBG_VQ(args...) clib_warning(args);
u8 buff[8];
n = read (uf->file_descriptor, ((char *) &buff), 8);
- DBG_SOCK ("if %d CALL queue %d", uf->private_data >> 8,
- uf->private_data & 0xff);
return 0;
}
(msg.state.num == 0) || /* it cannot be zero */
((msg.state.num - 1) & msg.state.num)) /* must be power of 2 */
goto close_socket;
- vui->vrings[msg.state.index].qsz = msg.state.num;
+ vui->vrings[msg.state.index].qsz_mask = msg.state.num - 1;
break;
case VHOST_USER_SET_VRING_ADDR:
break;
case VHOST_USER_SET_VRING_CALL:
- DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_CALL u64 %d",
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_CALL %d",
vui->hw_if_index, msg.u64);
q = (u8) (msg.u64 & 0xFF);
break;
case VHOST_USER_SET_VRING_KICK:
- DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_KICK u64 %d",
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_KICK %d",
vui->hw_if_index, msg.u64);
q = (u8) (msg.u64 & 0xFF);
break;
case VHOST_USER_SET_VRING_ERR:
- DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ERR u64 %d",
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ERR %d",
vui->hw_if_index, msg.u64);
q = (u8) (msg.u64 & 0xFF);
break;
case VHOST_USER_GET_VRING_BASE:
- DBG_SOCK ("if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
- vui->hw_if_index, msg.state.index, msg.state.num);
-
if (msg.state.index >= VHOST_VRING_MAX_N)
{
DBG_SOCK ("invalid vring index VHOST_USER_GET_VRING_BASE:"
/* Spec says: Client must [...] stop ring upon receiving VHOST_USER_GET_VRING_BASE. */
vhost_user_vring_close (vui, msg.state.index);
+ DBG_SOCK ("if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
+ vui->hw_if_index, msg.state.index, msg.state.num);
break;
case VHOST_USER_NONE:
break;
case VHOST_USER_GET_PROTOCOL_FEATURES:
- DBG_SOCK ("if %d msg VHOST_USER_GET_PROTOCOL_FEATURES",
- vui->hw_if_index);
-
msg.flags |= 4;
msg.u64 = (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |
(1 << VHOST_USER_PROTOCOL_F_MQ);
msg.size = sizeof (msg.u64);
+ DBG_SOCK
+ ("if %d msg VHOST_USER_GET_PROTOCOL_FEATURES - reply 0x%016llx",
+ vui->hw_if_index, msg.u64);
break;
case VHOST_USER_SET_PROTOCOL_FEATURES:
- DBG_SOCK ("if %d msg VHOST_USER_SET_PROTOCOL_FEATURES features 0x%lx",
- vui->hw_if_index, msg.u64);
+ DBG_SOCK
+ ("if %d msg VHOST_USER_SET_PROTOCOL_FEATURES features 0x%016llx",
+ vui->hw_if_index, msg.u64);
vui->protocol_features = msg.u64;
break;
case VHOST_USER_GET_QUEUE_NUM:
- DBG_SOCK ("if %d msg VHOST_USER_GET_QUEUE_NUM", vui->hw_if_index);
msg.flags |= 4;
msg.u64 = VHOST_VRING_MAX_N;
msg.size = sizeof (msg.u64);
+ DBG_SOCK ("if %d msg VHOST_USER_GET_QUEUE_NUM - reply %d",
+ vui->hw_if_index, msg.u64);
break;
case VHOST_USER_SET_VRING_ENABLE:
vlib_buffer_t * b, vhost_user_vring_t * txvq)
{
vhost_user_main_t *vum = &vhost_user_main;
- u32 qsz_mask = txvq->qsz - 1;
u32 last_avail_idx = txvq->last_avail_idx;
- u32 desc_current = txvq->avail->ring[last_avail_idx & qsz_mask];
+ u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
vring_desc_t *hdr_desc = 0;
virtio_net_hdr_mrg_rxbuf_t *hdr;
u32 hint = 0;
*/
u32 discarded_packets = 0;
u32 avail_idx = txvq->avail->idx;
- u16 qsz_mask = txvq->qsz - 1;
while (discarded_packets != discard_max)
{
if (avail_idx == txvq->last_avail_idx)
goto out;
u16 desc_chain_head =
- txvq->avail->ring[txvq->last_avail_idx & qsz_mask];
+ txvq->avail->ring[txvq->last_avail_idx & txvq->qsz_mask];
txvq->last_avail_idx++;
- txvq->used->ring[txvq->last_used_idx & qsz_mask].id = desc_chain_head;
- txvq->used->ring[txvq->last_used_idx & qsz_mask].len = 0;
+ txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].id =
+ desc_chain_head;
+ txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].len = 0;
vhost_user_log_dirty_ring (vui, txvq,
- ring[txvq->last_used_idx & qsz_mask]);
+ ring[txvq->last_used_idx & txvq->qsz_mask]);
txvq->last_used_idx++;
discarded_packets++;
}
u32 n_left_to_next, *to_next;
u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
u32 n_trace = vlib_get_trace_count (vm, node);
- u16 qsz_mask;
u32 map_hint = 0;
u16 thread_index = vlib_get_thread_index ();
u16 copy_len = 0;
return 0;
}
- if (PREDICT_FALSE (n_left == txvq->qsz))
+ if (PREDICT_FALSE (n_left == (txvq->qsz_mask + 1)))
{
/*
* Informational error logging when VPP is not
VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
}
- qsz_mask = txvq->qsz - 1;
-
if (n_left > VLIB_FRAME_SIZE)
n_left = VLIB_FRAME_SIZE;
break;
}
- desc_current = txvq->avail->ring[txvq->last_avail_idx & qsz_mask];
+ desc_current =
+ txvq->avail->ring[txvq->last_avail_idx & txvq->qsz_mask];
vum->cpus[thread_index].rx_buffers_len--;
bi_current = (vum->cpus[thread_index].rx_buffers)
[vum->cpus[thread_index].rx_buffers_len];
rx_buffers_len - 1], LOAD);
/* Just preset the used descriptor id and length for later */
- txvq->used->ring[txvq->last_used_idx & qsz_mask].id = desc_current;
- txvq->used->ring[txvq->last_used_idx & qsz_mask].len = 0;
+ txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].id =
+ desc_current;
+ txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].len = 0;
vhost_user_log_dirty_ring (vui, txvq,
- ring[txvq->last_used_idx & qsz_mask]);
+ ring[txvq->last_used_idx &
+ txvq->qsz_mask]);
/* The buffer should already be initialized */
b_head->total_length_not_including_first_buffer = 0;
vlib_buffer_t * b, vhost_user_vring_t * rxvq)
{
vhost_user_main_t *vum = &vhost_user_main;
- u32 qsz_mask = rxvq->qsz - 1;
u32 last_avail_idx = rxvq->last_avail_idx;
- u32 desc_current = rxvq->avail->ring[last_avail_idx & qsz_mask];
+ u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
vring_desc_t *hdr_desc = 0;
u32 hint = 0;
pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
u32 qid = ~0;
vhost_user_vring_t *rxvq;
- u16 qsz_mask;
u8 error;
u32 thread_index = vlib_get_thread_index ();
u32 map_hint = 0;
if (PREDICT_FALSE (vui->use_tx_spinlock))
vhost_user_vring_lock (vui, qid);
- qsz_mask = rxvq->qsz - 1; /* qsz is always power of 2 */
-
retry:
error = VHOST_USER_TX_FUNC_ERROR_NONE;
tx_headers_len = 0;
desc_table = rxvq->desc;
desc_head = desc_index =
- rxvq->avail->ring[rxvq->last_avail_idx & qsz_mask];
+ rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
/* Go deeper in case of indirect descriptor
* I don't know of any driver providing indirect for RX. */
&vum->cpus[thread_index].tx_headers[tx_headers_len - 1];
//Move from available to used buffer
- rxvq->used->ring[rxvq->last_used_idx & qsz_mask].id =
+ rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
desc_head;
- rxvq->used->ring[rxvq->last_used_idx & qsz_mask].len =
+ rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
desc_len;
vhost_user_log_dirty_ring (vui, rxvq,
ring[rxvq->last_used_idx &
- qsz_mask]);
+ rxvq->qsz_mask]);
rxvq->last_avail_idx++;
rxvq->last_used_idx++;
desc_table = rxvq->desc;
desc_head = desc_index =
- rxvq->avail->ring[rxvq->last_avail_idx & qsz_mask];
+ rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
if (PREDICT_FALSE
(rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
{
}
//Move from available to used ring
- rxvq->used->ring[rxvq->last_used_idx & qsz_mask].id = desc_head;
- rxvq->used->ring[rxvq->last_used_idx & qsz_mask].len = desc_len;
+ rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
+ rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
vhost_user_log_dirty_ring (vui, rxvq,
- ring[rxvq->last_used_idx & qsz_mask]);
+ ring[rxvq->last_used_idx & rxvq->qsz_mask]);
rxvq->last_avail_idx++;
rxvq->last_used_idx++;
vlib_cli_output (vm,
" qsz %d last_avail_idx %d last_used_idx %d\n",
- vui->vrings[q].qsz, vui->vrings[q].last_avail_idx,
+ vui->vrings[q].qsz_mask + 1,
+ vui->vrings[q].last_avail_idx,
vui->vrings[q].last_used_idx);
if (vui->vrings[q].avail && vui->vrings[q].used)
" id addr len flags next user_addr\n");
vlib_cli_output (vm,
" ===== ================== ===== ====== ===== ==================\n");
- for (j = 0; j < vui->vrings[q].qsz; j++)
+ for (j = 0; j < vui->vrings[q].qsz_mask + 1; j++)
{
u32 mem_hint = 0;
vlib_cli_output (vm,
};
/* *INDENT-ON* */
+clib_error_t *
+debug_vhost_user_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ clib_error_t *error = NULL;
+ vhost_user_main_t *vum = &vhost_user_main;
+ u8 onoff = 0;
+ u8 input_found = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return clib_error_return (0, "missing argument");
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (input_found)
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+
+ if (unformat (line_input, "on"))
+ {
+ input_found = 1;
+ onoff = 1;
+ }
+ else if (unformat (line_input, "off"))
+ {
+ input_found = 1;
+ onoff = 0;
+ }
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ vum->debug = onoff;
+
+done:
+ unformat_free (line_input);
+
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (debug_vhost_user_command, static) = {
+ .path = "debug vhost-user",
+ .short_help = "debug vhost-user <on | off>",
+ .function = debug_vhost_user_command_fn,
+};
+/* *INDENT-ON* */
+
static clib_error_t *
vhost_user_config (vlib_main_t * vm, unformat_input_t * input)
{