X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fdevices%2Fvirtio%2Fvhost_user.c;h=cd37d4c59f8b19640f26826535b64a2292a88d1c;hb=2c77ae484;hp=7ea7cbef995754910e3f766c4530adeb4ccbb433;hpb=38071b1331b44746679997f6e66081c4936d087c;p=vpp.git diff --git a/src/vnet/devices/virtio/vhost_user.c b/src/vnet/devices/virtio/vhost_user.c index 7ea7cbef995..cd37d4c59f8 100644 --- a/src/vnet/devices/virtio/vhost_user.c +++ b/src/vnet/devices/virtio/vhost_user.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -116,40 +117,45 @@ unmap_all_mem_regions (vhost_user_intf_t * vui) } static_always_inline void -vhost_user_tx_thread_placement (vhost_user_intf_t * vui) +vhost_user_tx_thread_placement (vhost_user_intf_t *vui, u32 qid) { - //Let's try to assign one queue to each thread - u32 qid; - u32 thread_index = 0; + vnet_main_t *vnm = vnet_get_main (); + vhost_user_vring_t *rxvq = &vui->vrings[qid]; + u32 q = qid >> 1, rxvq_count; - vui->use_tx_spinlock = 0; - while (1) + ASSERT ((qid & 1) == 0); + if (!rxvq->started || !rxvq->enabled) + return; + + rxvq_count = (qid >> 1) + 1; + if (rxvq->queue_index == ~0) { - for (qid = 0; qid < vui->num_qid / 2; qid++) - { - vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)]; - if (!rxvq->started || !rxvq->enabled) - continue; - - vui->per_cpu_tx_qid[thread_index] = qid; - thread_index++; - if (thread_index == vlib_get_thread_main ()->n_vlib_mains) - return; - } - //We need to loop, meaning the spinlock has to be used - vui->use_tx_spinlock = 1; - if (thread_index == 0) - { - //Could not find a single valid one - for (thread_index = 0; - thread_index < vlib_get_thread_main ()->n_vlib_mains; - thread_index++) - { - vui->per_cpu_tx_qid[thread_index] = 0; - } - return; - } + rxvq->queue_index = + vnet_hw_if_register_tx_queue (vnm, vui->hw_if_index, q); + rxvq->qid = q; + } + + FOR_ALL_VHOST_RXQ (q, vui) + { + vhost_user_vring_t *rxvq = &vui->vrings[q]; + u32 qi = rxvq->queue_index; + + if (rxvq->queue_index == ~0) + break; + for (u32 i = 0; i < vlib_get_n_threads (); i++) + vnet_hw_if_tx_queue_unassign_thread (vnm, qi, i); + } + + for (u32 i = 0; i < vlib_get_n_threads (); i++) + { + vhost_user_vring_t *rxvq = + &vui->vrings[VHOST_VRING_IDX_RX (i % rxvq_count)]; + u32 qi = rxvq->queue_index; + + vnet_hw_if_tx_queue_assign_thread (vnm, qi, i); } + + vnet_hw_if_update_runtime_data (vnm, vui->hw_if_index); } /** @@ -243,7 +249,7 @@ vhost_user_thread_placement (vhost_user_intf_t * vui, u32 qid) vhost_user_rx_thread_placement (vui, qid); } else - vhost_user_tx_thread_placement (vui); + vhost_user_tx_thread_placement (vui, qid); } static clib_error_t * @@ -1657,10 +1663,6 @@ vhost_user_vui_init (vnet_main_t * vnm, vhost_user_intf_t * vui, if (sw_if_index) *sw_if_index = vui->sw_if_index; - - vec_validate (vui->per_cpu_tx_qid, - vlib_get_thread_main ()->n_vlib_mains - 1); - vhost_user_tx_thread_placement (vui); } int @@ -2130,7 +2132,6 @@ show_vhost_user_command_fn (vlib_main_t * vm, u32 hw_if_index, *hw_if_indices = 0; vnet_hw_interface_t *hi; u16 qid; - u32 ci; int i, j, q; int show_descr = 0; int show_verbose = 0; @@ -2263,13 +2264,20 @@ show_vhost_user_command_fn (vlib_main_t * vm, txvq->mode); } - vlib_cli_output (vm, " tx placement: %s\n", - vui->use_tx_spinlock ? "spin-lock" : "lock-free"); + vlib_cli_output (vm, " tx placement\n"); - vec_foreach_index (ci, vui->per_cpu_tx_qid) + FOR_ALL_VHOST_RXQ (qid, vui) { - vlib_cli_output (vm, " thread %d on vring %d\n", ci, - VHOST_VRING_IDX_RX (vui->per_cpu_tx_qid[ci])); + vhost_user_vring_t *rxvq = &vui->vrings[qid]; + vnet_hw_if_tx_queue_t *txq; + + if (rxvq->queue_index == ~0) + continue; + txq = vnet_hw_if_get_tx_queue (vnm, rxvq->queue_index); + if (txq->threads) + vlib_cli_output (vm, " threads %U on vring %u: %s\n", + format_bitmap_list, txq->threads, qid, + txq->shared_queue ? "spin-lock" : "lock-free"); } vlib_cli_output (vm, "\n"); @@ -2302,9 +2310,8 @@ show_vhost_user_command_fn (vlib_main_t * vm, vlib_cli_output (vm, "\n Virtqueue %d (%s%s)\n", q, (q & 1) ? "RX" : "TX", vui->vrings[q].enabled ? "" : " disabled"); - if (q & 1) - vlib_cli_output (vm, " global RX queue index %u\n", - vui->vrings[q].queue_index); + vlib_cli_output (vm, " global %s queue index %u\n", + (q & 1) ? "RX" : "TX", vui->vrings[q].queue_index); vlib_cli_output ( vm, @@ -2336,23 +2343,25 @@ done: * * There are several parameters associated with a vHost interface: * - * - socket - Name of the linux socket used by hypervisor - * and VPP to manage the vHost interface. If in 'server' mode, VPP will - * create the socket if it does not already exist. If in 'client' mode, - * hypervisor will create the socket if it does not already exist. The VPP code - * is indifferent to the file location. However, if SELinux is enabled, then the - * socket needs to be created in '/var/run/vpp/'. + * - socket - Name of the linux socket used by + * hypervisor and VPP to manage the vHost interface. If in server + * mode, VPP will create the socket if it does not already exist. If in + * client mode, hypervisor will create the socket if it does not + * already exist. The VPP code is indifferent to the file location. However, + * if SELinux is enabled, then the socket needs to be created in + * /var/run/vpp/. * - * - server - Optional flag to indicate that VPP should be the server for - * the linux socket. If not provided, VPP will be the client. In 'server' - * mode, the VM can be reset without tearing down the vHost Interface. In - * 'client' mode, VPP can be reset without bringing down the VM and - * tearing down the vHost Interface. + * - server - Optional flag to indicate that VPP should be the server + * for the linux socket. If not provided, VPP will be the client. In + * server mode, the VM can be reset without tearing down the vHost + * Interface. In client mode, VPP can be reset without bringing down + * the VM and tearing down the vHost Interface. * - * - feature-mask - Optional virtio/vhost feature set negotiated at - * startup. This is intended for degugging only. It is recommended that this - * parameter not be used except by experienced users. By default, all supported - * features will be advertised. Otherwise, provide the set of features desired. + * - feature-mask - Optional virtio/vhost feature set negotiated + * at startup. This is intended for degugging only. It is recommended + * that this parameter not be used except by experienced users. By default, + * all supported features will be advertised. Otherwise, provide the set of + * features desired. * - 0x000008000 (15) - VIRTIO_NET_F_MRG_RXBUF * - 0x000020000 (17) - VIRTIO_NET_F_CTRL_VQ * - 0x000200000 (21) - VIRTIO_NET_F_GUEST_ANNOUNCE @@ -2366,18 +2375,21 @@ done: * - hwaddr - Optional ethernet address, can be in either * X:X:X:X:X:X unix or X.X.X cisco format. * - * - renumber - Optional parameter which allows the instance - * in the name to be specified. If instance already exists, name will be used - * anyway and multiple instances will have the same name. Use with caution. + * - renumber - Optional parameter which allows the + * instance in the name to be specified. If instance already exists, name + * will be used anyway and multiple instances will have the same name. Use + * with caution. * * @cliexpar - * Example of how to create a vhost interface with VPP as the client and all features enabled: + * Example of how to create a vhost interface with VPP as the client and all + * features enabled: * @cliexstart{create vhost-user socket /var/run/vpp/vhost1.sock} * VirtualEthernet0/0/0 * @cliexend - * Example of how to create a vhost interface with VPP as the server and with just - * multiple queues enabled: - * @cliexstart{create vhost-user socket /var/run/vpp/vhost2.sock server feature-mask 0x40400000} + * Example of how to create a vhost interface with VPP as the server and with + * just multiple queues enabled: + * @cliexstart{create vhost-user socket /var/run/vpp/vhost2.sock server + * feature-mask 0x40400000} * VirtualEthernet0/0/1 * @cliexend * Once the vHost interface is created, enable the interface using: @@ -2415,9 +2427,9 @@ VLIB_CLI_COMMAND (vhost_user_delete_command, static) = { /*? * Display the attributes of a single vHost User interface (provide interface - * name), multiple vHost User interfaces (provide a list of interface names seperated - * by spaces) or all Vhost User interfaces (omit an interface name to display all - * vHost interfaces). + * name), multiple vHost User interfaces (provide a list of interface names + * separated by spaces) or all Vhost User interfaces (omit an interface name + * to display all vHost interfaces). * * @cliexpar * @parblock @@ -2451,10 +2463,10 @@ VLIB_CLI_COMMAND (vhost_user_delete_command, static) = { * thread 2 on vring 0 * * Memory regions (total 2) - * region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr - * ====== ===== ================== ================== ================== ================== ================== - * 0 60 0x0000000000000000 0x00000000000a0000 0x00002aaaaac00000 0x0000000000000000 0x00002aab2b400000 - * 1 61 0x00000000000c0000 0x000000003ff40000 0x00002aaaaacc0000 0x00000000000c0000 0x00002aababcc0000 + * region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr + * ====== == =============== =========== ============== =========== ========== + * 0 60 0x00000000 0x000a0000 0xaac00000 0x00000000 0x2b400000 + * 1 61 0x000c0000 0x3ff40000 0xaacc0000 0x000c0000 0xabcc0000 * * Virtqueue 0 (TX) * qsz 256 last_avail_idx 0 last_used_idx 0 @@ -2498,8 +2510,9 @@ VLIB_CLI_COMMAND (vhost_user_delete_command, static) = { * * @cliexend * - * The optional 'descriptors' parameter will display the same output as - * the previous example but will include the descriptor table for each queue. + * The optional 'descriptors' parameter will display the same output + * as the previous example but will include the descriptor table for each + * queue. * The output is truncated below: * @cliexstart{show vhost-user VirtualEthernet0/0/0 descriptors} * Virtio vhost-user interfaces