2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <sys/socket.h>
21 #include <vlib/vlib.h>
22 #include <vlib/unix/unix.h>
24 #include <vnet/vnet.h>
25 #include <vppinfra/vec.h>
26 #include <vppinfra/error.h>
27 #include <vppinfra/format.h>
29 #include <vnet/ethernet/ethernet.h>
30 #include <vnet/devices/dpdk/dpdk.h>
32 #include <vnet/devices/virtio/vhost-user.h>
34 #define VHOST_USER_DEBUG_SOCKET 0
36 #if VHOST_USER_DEBUG_SOCKET == 1
37 #define DBG_SOCK(args...) clib_warning(args);
39 #define DBG_SOCK(args...)
42 static const char *vhost_message_str[] = {
43 [VHOST_USER_NONE] = "VHOST_USER_NONE",
44 [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
45 [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
46 [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
47 [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
48 [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
49 [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
50 [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
51 [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
52 [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
53 [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
54 [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
55 [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
56 [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
57 [VHOST_USER_SET_VRING_ERR] = "VHOST_USER_SET_VRING_ERR",
58 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
59 [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
60 [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
61 [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
62 [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
67 * DPDK vhost-user functions
70 /* portions taken from dpdk
73 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
74 * All rights reserved.
76 * Redistribution and use in source and binary forms, with or without
77 * modification, are permitted provided that the following conditions
80 * * Redistributions of source code must retain the above copyright
81 * notice, this list of conditions and the following disclaimer.
82 * * Redistributions in binary form must reproduce the above copyright
83 * notice, this list of conditions and the following disclaimer in
84 * the documentation and/or other materials provided with the
86 * * Neither the name of Intel Corporation nor the names of its
87 * contributors may be used to endorse or promote products derived
88 * from this software without specific prior written permission.
90 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
91 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
92 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
93 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
94 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
95 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
96 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
97 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
98 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
99 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
100 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
105 qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
107 struct virtio_memory_regions *region;
108 uint64_t vhost_va = 0;
109 uint32_t regionidx = 0;
111 /* Find the region where the address lives. */
112 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
113 region = &dev->mem->regions[regionidx];
114 if ((qemu_va >= region->userspace_address) &&
115 (qemu_va <= region->userspace_address +
116 region->memory_size)) {
117 vhost_va = qemu_va + region->guest_phys_address +
118 region->address_offset -
119 region->userspace_address;
126 static dpdk_device_t *
127 dpdk_vhost_user_device_from_hw_if_index(u32 hw_if_index)
129 vnet_main_t *vnm = vnet_get_main();
130 dpdk_main_t * dm = &dpdk_main;
131 vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
132 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
134 if (xd->dev_type != VNET_DPDK_DEV_VHOST_USER)
140 static dpdk_device_t *
141 dpdk_vhost_user_device_from_sw_if_index(u32 sw_if_index)
143 vnet_main_t *vnm = vnet_get_main();
144 vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, sw_if_index);
145 ASSERT (sw->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
147 return dpdk_vhost_user_device_from_hw_if_index(sw->hw_if_index);
150 static void stop_processing_packets(u32 hw_if_index, u8 idx)
153 dpdk_vhost_user_device_from_hw_if_index(hw_if_index);
155 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
156 xd->vu_vhost_dev.virtqueue[idx]->enabled = 0;
158 xd->vu_is_running = 0;
162 static void disable_interface(dpdk_device_t * xd)
164 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
166 int numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
167 for (idx = 0; idx < numqs; idx++)
168 xd->vu_vhost_dev.virtqueue[idx]->enabled = 0;
171 xd->vu_is_running = 0;
174 static inline void * map_guest_mem(dpdk_device_t * xd, u64 addr)
176 dpdk_vu_intf_t * vui = xd->vu_intf;
177 struct virtio_memory * mem = xd->vu_vhost_dev.mem;
179 for (i=0; i<mem->nregions; i++) {
180 if ((mem->regions[i].guest_phys_address <= addr) &&
181 ((mem->regions[i].guest_phys_address + mem->regions[i].memory_size) > addr)) {
182 return (void *) (vui->region_addr[i] + addr - mem->regions[i].guest_phys_address);
185 DBG_SOCK("failed to map guest mem addr %lx", addr);
189 static clib_error_t *
190 dpdk_create_vhost_user_if_internal (u32 * hw_if_index, u32 if_id)
192 dpdk_main_t * dm = &dpdk_main;
193 vlib_main_t * vm = vlib_get_main();
194 vlib_thread_main_t * tm = vlib_get_thread_main();
195 vnet_sw_interface_t * sw;
196 clib_error_t * error;
197 dpdk_device_and_queue_t * dq;
200 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
201 num_qpairs = dm->use_rss < 1 ? 1 : dm->use_rss;
204 dpdk_device_t * xd = NULL;
208 vlib_worker_thread_barrier_sync (vm);
210 int inactive_cnt = vec_len(dm->vu_inactive_interfaces_device_index);
211 // if there are any inactive ifaces
212 if (inactive_cnt > 0) {
214 u32 vui_idx = dm->vu_inactive_interfaces_device_index[inactive_cnt - 1];
215 if (vec_len(dm->devices) > vui_idx) {
216 xd = vec_elt_at_index (dm->devices, vui_idx);
217 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER) {
218 DBG_SOCK("reusing inactive vhost-user interface sw_if_index %d", xd->vlib_sw_if_index);
220 clib_warning("error: inactive vhost-user interface sw_if_index %d not VHOST_USER type!",
221 xd->vlib_sw_if_index);
222 // reset so new interface is created
226 // "remove" from inactive list
227 _vec_len(dm->vu_inactive_interfaces_device_index) -= 1;
231 // existing interface used - do not overwrite if_id if not needed
232 if (if_id != (u32)~0)
233 xd->vu_if_id = if_id;
236 for (j = 0; j < num_qpairs * VIRTIO_QNUM; j++) {
237 memset(xd->vu_vhost_dev.virtqueue[j], 0, sizeof(struct vhost_virtqueue));
238 xd->vu_vhost_dev.virtqueue[j]->kickfd = -1;
239 xd->vu_vhost_dev.virtqueue[j]->callfd = -1;
240 xd->vu_vhost_dev.virtqueue[j]->backend = -1;
245 memset ((void *) xd->lockp, 0, CLIB_CACHE_LINE_BYTES);
248 for (j = 0; j < tm->n_vlib_mains; j++)
250 vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE,
251 sizeof(tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES);
252 vec_reset_length (xd->tx_vectors[j]);
256 for (j = 0; j < xd->rx_q_used; j++)
258 vec_validate_aligned (xd->rx_vectors[j], VLIB_FRAME_SIZE-1,
259 CLIB_CACHE_LINE_BYTES);
260 vec_reset_length (xd->rx_vectors[j]);
263 // vui was not retrieved from inactive ifaces - create new
264 vec_add2_aligned (dm->devices, xd, 1, CLIB_CACHE_LINE_BYTES);
265 xd->dev_type = VNET_DPDK_DEV_VHOST_USER;
266 xd->rx_q_used = num_qpairs;
267 xd->tx_q_used = num_qpairs;
268 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
269 xd->vu_vhost_dev.virt_qp_nb = num_qpairs;
272 vec_validate_aligned (xd->rx_vectors, xd->rx_q_used, CLIB_CACHE_LINE_BYTES);
274 if (if_id == (u32)~0)
275 xd->vu_if_id = dm->next_vu_if_id++;
277 xd->vu_if_id = if_id;
279 xd->device_index = xd - dm->devices;
280 xd->per_interface_next_index = ~0;
283 xd->vu_vhost_dev.mem = clib_mem_alloc (sizeof(struct virtio_memory) +
284 VHOST_MEMORY_MAX_NREGIONS *
285 sizeof(struct virtio_memory_regions));
287 /* Will be set when guest sends VHOST_USER_SET_MEM_TABLE cmd */
288 xd->vu_vhost_dev.mem->nregions = 0;
291 * New virtqueue structure is an array of VHOST_MAX_QUEUE_PAIRS * 2
292 * We need to allocate numq pairs.
294 for (j = 0; j < num_qpairs * VIRTIO_QNUM; j++) {
295 xd->vu_vhost_dev.virtqueue[j] = clib_mem_alloc (sizeof(struct vhost_virtqueue));
296 memset(xd->vu_vhost_dev.virtqueue[j], 0, sizeof(struct vhost_virtqueue));
297 xd->vu_vhost_dev.virtqueue[j]->kickfd = -1;
298 xd->vu_vhost_dev.virtqueue[j]->callfd = -1;
299 xd->vu_vhost_dev.virtqueue[j]->backend = -1;
303 if (xd->tx_q_used < dm->input_cpu_count) {
304 xd->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
305 CLIB_CACHE_LINE_BYTES);
306 memset ((void *) xd->lockp, 0, CLIB_CACHE_LINE_BYTES);
309 DBG_SOCK("tm->n_vlib_mains: %d. TX %d, RX: %d, num_qpairs: %d, Lock: %p",
310 tm->n_vlib_mains, xd->tx_q_used, xd->rx_q_used, num_qpairs, xd->lockp);
312 vec_validate_aligned (xd->tx_vectors, tm->n_vlib_mains,
313 CLIB_CACHE_LINE_BYTES);
315 for (j = 0; j < tm->n_vlib_mains; j++)
317 vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE,
318 sizeof(tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES);
319 vec_reset_length (xd->tx_vectors[j]);
323 for (j = 0; j < xd->rx_q_used; j++)
325 vec_validate_aligned (xd->rx_vectors[j], VLIB_FRAME_SIZE-1,
326 CLIB_CACHE_LINE_BYTES);
327 vec_reset_length (xd->rx_vectors[j]);
330 vec_validate_aligned (xd->frames, tm->n_vlib_mains,
331 CLIB_CACHE_LINE_BYTES);
335 * Generate random MAC address for the interface
337 f64 now = vlib_time_now(vm);
339 rnd = (u32) (now * 1e6);
340 rnd = random_u32 (&rnd);
342 memcpy (addr+2, &rnd, sizeof(rnd));
346 error = ethernet_register_interface
348 dpdk_device_class.index,
350 /* ethernet address */ addr,
351 &xd->vlib_hw_if_index,
357 sw = vnet_get_hw_sw_interface (dm->vnet_main, xd->vlib_hw_if_index);
358 xd->vlib_sw_if_index = sw->sw_if_index;
361 xd->vu_intf = clib_mem_alloc (sizeof(*(xd->vu_intf)));
363 *hw_if_index = xd->vlib_hw_if_index;
365 DBG_SOCK("xd->device_index: %d, dm->input_cpu_count: %d, "
366 "dm->input_cpu_first_index: %d\n", xd->device_index,
367 dm->input_cpu_count, dm->input_cpu_first_index);
370 for (q = 0; q < num_qpairs; q++) {
371 int cpu = dm->input_cpu_first_index +
372 (next_cpu % dm->input_cpu_count);
374 unsigned lcore = vlib_worker_threads[cpu].dpdk_lcore_id;
375 vec_validate(xd->cpu_socket_id_by_queue, q);
376 xd->cpu_socket_id_by_queue[q] = rte_lcore_to_socket_id(lcore);
378 vec_add2(dm->devices_by_cpu[cpu], dq, 1);
379 dq->device = xd->device_index;
381 DBG_SOCK("CPU for %d = %d. QID: %d", *hw_if_index, cpu, dq->queue_id);
383 // start polling if it was not started yet (because of no phys ifaces)
384 if (tm->n_vlib_mains == 1 && dpdk_input_node.state != VLIB_NODE_STATE_POLLING)
385 vlib_node_set_state (vm, dpdk_input_node.index, VLIB_NODE_STATE_POLLING);
387 if (tm->n_vlib_mains > 1 && tm->main_thread_is_io_node)
388 vlib_node_set_state (vm, dpdk_io_input_node.index, VLIB_NODE_STATE_POLLING);
390 if (tm->n_vlib_mains > 1 && !tm->main_thread_is_io_node)
391 vlib_node_set_state (vlib_mains[cpu], dpdk_input_node.index,
392 VLIB_NODE_STATE_POLLING);
396 vlib_worker_thread_barrier_release (vm);
400 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
401 static clib_error_t *
402 dpdk_vhost_user_set_protocol_features(u32 hw_if_index, u64 prot_features)
405 xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index);
407 xd->vu_vhost_dev.protocol_features = prot_features;
412 static clib_error_t *
413 dpdk_vhost_user_get_features(u32 hw_if_index, u64 * features)
415 *features = rte_vhost_feature_get();
417 DBG_SOCK("supported features: 0x%lx", *features);
421 static clib_error_t *
422 dpdk_vhost_user_set_features(u32 hw_if_index, u64 features)
425 u16 hdr_len = sizeof(struct virtio_net_hdr);
428 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
429 clib_warning("not a vhost-user interface");
433 xd->vu_vhost_dev.features = features;
435 if (xd->vu_vhost_dev.features & (1 << VIRTIO_NET_F_MRG_RXBUF))
436 hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
438 int numqs = VIRTIO_QNUM;
440 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
441 numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
443 for (idx = 0; idx < numqs; idx++)
444 xd->vu_vhost_dev.virtqueue[idx]->vhost_hlen = hdr_len;
449 static clib_error_t *
450 dpdk_vhost_user_set_mem_table(u32 hw_if_index, vhost_user_memory_t * vum, int fd[])
452 struct virtio_memory * mem;
455 dpdk_vu_intf_t * vui;
457 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
458 clib_warning("not a vhost-user interface");
463 mem = xd->vu_vhost_dev.mem;
465 mem->nregions = vum->nregions;
467 for (i=0; i < mem->nregions; i++) {
468 u64 mapped_size, mapped_address;
470 mem->regions[i].guest_phys_address = vum->regions[i].guest_phys_addr;
471 mem->regions[i].guest_phys_address_end = vum->regions[i].guest_phys_addr +
472 vum->regions[i].memory_size;
473 mem->regions[i].memory_size = vum->regions[i].memory_size;
474 mem->regions[i].userspace_address = vum->regions[i].userspace_addr;
476 mapped_size = mem->regions[i].memory_size + vum->regions[i].mmap_offset;
477 mapped_address = (uint64_t)(uintptr_t)mmap(NULL, mapped_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd[i], 0);
479 if ((void *)mapped_address == MAP_FAILED)
481 clib_warning("mmap error");
485 mapped_address += vum->regions[i].mmap_offset;
486 vui->region_addr[i] = mapped_address;
487 vui->region_fd[i] = fd[i];
488 mem->regions[i].address_offset = mapped_address - mem->regions[i].guest_phys_address;
490 if (vum->regions[i].guest_phys_addr == 0) {
491 mem->base_address = vum->regions[i].userspace_addr;
492 mem->mapped_address = mem->regions[i].address_offset;
496 disable_interface(xd);
500 static clib_error_t *
501 dpdk_vhost_user_set_vring_num(u32 hw_if_index, u8 idx, u32 num)
504 struct vhost_virtqueue *vq;
506 DBG_SOCK("idx %u num %u", idx, num);
508 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
509 clib_warning("not a vhost-user interface");
512 vq = xd->vu_vhost_dev.virtqueue[idx];
515 stop_processing_packets(hw_if_index, idx);
520 static clib_error_t *
521 dpdk_vhost_user_set_vring_addr(u32 hw_if_index, u8 idx, u64 desc, u64 used, u64 avail)
524 struct vhost_virtqueue *vq;
526 DBG_SOCK("idx %u desc 0x%lx used 0x%lx avail 0x%lx",
527 idx, desc, used, avail);
529 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
530 clib_warning("not a vhost-user interface");
533 vq = xd->vu_vhost_dev.virtqueue[idx];
535 vq->desc = (struct vring_desc *) qva_to_vva(&xd->vu_vhost_dev, desc);
536 vq->used = (struct vring_used *) qva_to_vva(&xd->vu_vhost_dev, used);
537 vq->avail = (struct vring_avail *) qva_to_vva(&xd->vu_vhost_dev, avail);
539 if (!(vq->desc && vq->used && vq->avail)) {
540 clib_warning("falied to set vring addr");
543 stop_processing_packets(hw_if_index, idx);
548 static clib_error_t *
549 dpdk_vhost_user_get_vring_base(u32 hw_if_index, u8 idx, u32 * num)
552 struct vhost_virtqueue *vq;
554 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
555 clib_warning("not a vhost-user interface");
559 vq = xd->vu_vhost_dev.virtqueue[idx];
560 *num = vq->last_used_idx;
564 * Client must start ring upon receiving a kick
565 * (that is, detecting that file descriptor is readable)
566 * on the descriptor specified by VHOST_USER_SET_VRING_KICK,
567 * and stop ring upon receiving VHOST_USER_GET_VRING_BASE.
569 dpdk_vu_intf_t *vui = xd->vu_intf;
570 DBG_SOCK("Stopping vring Q %u of device %d", idx, hw_if_index);
571 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
572 vui->vrings[idx].enabled = 0; /* Reset local copy */
573 vui->vrings[idx].callfd = -1; /* Reset FD */
579 /* Check if all Qs are disabled */
580 int numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
581 for (idx = 0; idx < numqs; idx++) {
582 if (xd->vu_vhost_dev.virtqueue[idx]->enabled)
586 /* If all vrings are disabed then disable device */
588 DBG_SOCK("Device %d disabled", hw_if_index);
589 xd->vu_is_running = 0;
595 xd->vu_is_running = 0;
601 static clib_error_t *
602 dpdk_vhost_user_set_vring_base(u32 hw_if_index, u8 idx, u32 num)
605 struct vhost_virtqueue *vq;
607 DBG_SOCK("idx %u num %u", idx, num);
609 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
610 clib_warning("not a vhost-user interface");
614 vq = xd->vu_vhost_dev.virtqueue[idx];
615 vq->last_used_idx = num;
616 vq->last_used_idx_res = num;
618 stop_processing_packets(hw_if_index, idx);
623 static clib_error_t *
624 dpdk_vhost_user_set_vring_kick(u32 hw_if_index, u8 idx, int fd)
626 dpdk_main_t * dm = &dpdk_main;
628 dpdk_vu_vring *vring;
629 struct vhost_virtqueue *vq0, *vq1, *vq;
630 int index, vu_is_running = 0;
632 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
633 clib_warning("not a vhost-user interface");
637 vring = &xd->vu_intf->vrings[idx];
638 vq = xd->vu_vhost_dev.virtqueue[idx];
641 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
642 vq->enabled = (vq->desc && vq->avail && vq->used && vring->enabled) ? 1 : 0;
646 * Set xd->vu_is_running if at least one pair of
647 * RX/TX queues are enabled.
649 int numqs = VIRTIO_QNUM;
650 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
651 numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
654 for (index = 0; index < numqs; index += 2) {
655 vq0 = xd->vu_vhost_dev.virtqueue[index]; /* RX */
656 vq1 = xd->vu_vhost_dev.virtqueue[index + 1]; /* TX */
657 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
658 if (vq0->enabled && vq1->enabled)
660 if (vq0->desc && vq0->avail && vq0->used &&
661 vq1->desc && vq1->avail && vq1->used)
668 DBG_SOCK("SET_VRING_KICK - idx %d, running %d, fd: %d",
669 idx, vu_is_running, fd);
671 xd->vu_is_running = vu_is_running;
672 if (xd->vu_is_running && xd->admin_up) {
673 vnet_hw_interface_set_flags (dm->vnet_main,
674 xd->vlib_hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP |
675 ETH_LINK_FULL_DUPLEX );
681 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
682 int dpdk_vhost_user_set_vring_enable(u32 hw_if_index, u8 idx, int enable)
685 struct vhost_virtqueue *vq;
688 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
689 clib_warning("not a vhost-user interface");
695 * Guest vhost driver wrongly enables queue before
696 * setting the vring address. Therefore, save a
697 * local copy. Reflect it in vq structure if addresses
698 * are set. If not, vq will be enabled when vring
701 vui->vrings[idx].enabled = enable; /* Save local copy */
703 vq = xd->vu_vhost_dev.virtqueue[idx];
704 if (vq->desc && vq->avail && vq->used)
705 xd->vu_vhost_dev.virtqueue[idx]->enabled = enable;
711 static clib_error_t * dpdk_vhost_user_callfd_read_ready (unix_file_t * uf)
713 __attribute__((unused)) int n;
715 n = read(uf->file_descriptor, ((char*)&buff), 8);
719 static clib_error_t *
720 dpdk_vhost_user_set_vring_call(u32 hw_if_index, u8 idx, int fd)
723 struct vhost_virtqueue *vq;
724 unix_file_t template = {0};
726 DBG_SOCK("SET_VRING_CALL - idx %d, fd %d", idx, fd);
728 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
729 clib_warning("not a vhost-user interface");
733 dpdk_vu_intf_t *vui = xd->vu_intf;
735 /* if there is old fd, delete it */
736 if (vui->vrings[idx].callfd > 0) {
737 unix_file_t * uf = pool_elt_at_index (unix_main.file_pool,
738 vui->vrings[idx].callfd_idx);
739 unix_file_del (&unix_main, uf);
741 vui->vrings[idx].callfd = fd;
742 template.read_function = dpdk_vhost_user_callfd_read_ready;
743 template.file_descriptor = fd;
744 vui->vrings[idx].callfd_idx = unix_file_add (&unix_main, &template);
746 vq = xd->vu_vhost_dev.virtqueue[idx];
747 vq->callfd = -1; /* We use locally saved vring->callfd; */
753 dpdk_vhost_user_want_interrupt(dpdk_device_t *xd, int idx)
755 dpdk_vu_intf_t *vui = xd->vu_intf;
758 if (PREDICT_FALSE(vui->num_vrings <= 0))
761 dpdk_vu_vring *vring = &(vui->vrings[idx]);
762 struct vhost_virtqueue *vq = xd->vu_vhost_dev.virtqueue[idx];
764 /* return if vm is interested in interrupts */
765 return (vring->callfd > 0) && !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
769 dpdk_vhost_user_send_interrupt(vlib_main_t * vm, dpdk_device_t * xd, int idx)
771 dpdk_main_t * dm = &dpdk_main;
772 dpdk_vu_intf_t *vui = xd->vu_intf;
775 if (PREDICT_FALSE(vui->num_vrings <= 0))
778 dpdk_vu_vring *vring = &(vui->vrings[idx]);
779 struct vhost_virtqueue *vq = xd->vu_vhost_dev.virtqueue[idx];
781 /* if vm is interested in interrupts */
782 if((vring->callfd > 0) && !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
783 eventfd_write(vring->callfd, (eventfd_t)1);
784 vring->n_since_last_int = 0;
785 vring->int_deadline = vlib_time_now(vm) + dm->vhost_coalesce_time;
790 * vhost-user interface management functions
793 // initialize vui with specified attributes
795 dpdk_vhost_user_vui_init(vnet_main_t * vnm,
796 dpdk_device_t *xd, int sockfd,
797 const char * sock_filename,
798 u8 is_server, u64 feature_mask,
801 dpdk_vu_intf_t *vui = xd->vu_intf;
802 memset(vui, 0, sizeof(*vui));
804 vui->unix_fd = sockfd;
805 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
806 vui->num_vrings = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
808 vui->num_vrings = VIRTIO_QNUM;
810 DBG_SOCK("dpdk_vhost_user_vui_init VRINGS: %d", vui->num_vrings);
811 vui->sock_is_server = is_server;
812 strncpy(vui->sock_filename, sock_filename, ARRAY_LEN(vui->sock_filename)-1);
815 vui->feature_mask = feature_mask;
817 vui->unix_file_index = ~0;
819 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
822 *sw_if_index = xd->vlib_sw_if_index;
825 // register vui and start polling on it
827 dpdk_vhost_user_vui_register(vlib_main_t * vm, dpdk_device_t *xd)
829 dpdk_main_t * dm = &dpdk_main;
830 dpdk_vu_intf_t *vui = xd->vu_intf;
832 hash_set (dm->vu_sw_if_index_by_listener_fd, vui->unix_fd,
833 xd->vlib_sw_if_index);
837 dpdk_vhost_user_if_disconnect(dpdk_device_t * xd)
839 dpdk_vu_intf_t *vui = xd->vu_intf;
840 vnet_main_t * vnm = vnet_get_main();
841 dpdk_main_t * dm = &dpdk_main;
844 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
846 if (vui->unix_file_index != ~0) {
847 unix_file_del (&unix_main, unix_main.file_pool + vui->unix_file_index);
848 vui->unix_file_index = ~0;
851 hash_unset(dm->vu_sw_if_index_by_sock_fd, vui->unix_fd);
852 hash_unset(dm->vu_sw_if_index_by_listener_fd, vui->unix_fd);
857 DBG_SOCK("interface ifindex %d disconnected", xd->vlib_sw_if_index);
860 static clib_error_t * dpdk_vhost_user_socket_read (unix_file_t * uf)
863 int fd, number_of_fds = 0;
864 int fds[VHOST_MEMORY_MAX_NREGIONS];
865 vhost_user_msg_t msg;
868 dpdk_main_t * dm = &dpdk_main;
871 struct cmsghdr *cmsg;
874 vnet_main_t * vnm = vnet_get_main();
876 p = hash_get (dm->vu_sw_if_index_by_sock_fd, uf->file_descriptor);
878 DBG_SOCK ("FD %d doesn't belong to any interface",
879 uf->file_descriptor);
883 xd = dpdk_vhost_user_device_from_sw_if_index(p[0]);
888 char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))];
890 memset(&mh, 0, sizeof(mh));
891 memset(control, 0, sizeof(control));
893 /* set the payload */
894 iov[0].iov_base = (void *) &msg;
895 iov[0].iov_len = VHOST_USER_MSG_HDR_SZ;
899 mh.msg_control = control;
900 mh.msg_controllen = sizeof(control);
902 n = recvmsg(uf->file_descriptor, &mh, 0);
904 if (n != VHOST_USER_MSG_HDR_SZ)
907 if (mh.msg_flags & MSG_CTRUNC) {
911 cmsg = CMSG_FIRSTHDR(&mh);
913 if (cmsg && (cmsg->cmsg_len > 0) && (cmsg->cmsg_level == SOL_SOCKET) &&
914 (cmsg->cmsg_type == SCM_RIGHTS) &&
915 (cmsg->cmsg_len - CMSG_LEN(0) <= VHOST_MEMORY_MAX_NREGIONS * sizeof(int))) {
916 number_of_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
917 memcpy(fds, CMSG_DATA(cmsg), number_of_fds * sizeof(int));
920 /* version 1, no reply bit set*/
921 if ((msg.flags & 7) != 1) {
922 DBG_SOCK("malformed message received. closing socket");
927 int rv __attribute__((unused));
928 /* $$$$ pay attention to rv */
929 rv = read(uf->file_descriptor, ((char*)&msg) + n, msg.size);
932 DBG_SOCK("VPP VHOST message %s", vhost_message_str[msg.request]);
933 switch (msg.request) {
934 case VHOST_USER_GET_FEATURES:
935 DBG_SOCK("if %d msg VHOST_USER_GET_FEATURES",
936 xd->vlib_hw_if_index);
938 msg.flags |= VHOST_USER_REPLY_MASK;
940 dpdk_vhost_user_get_features(xd->vlib_hw_if_index, &msg.u64);
941 msg.u64 &= vui->feature_mask;
942 msg.size = sizeof(msg.u64);
945 case VHOST_USER_SET_FEATURES:
946 DBG_SOCK("if %d msg VHOST_USER_SET_FEATURES features 0x%016lx",
947 xd->vlib_hw_if_index, msg.u64);
949 dpdk_vhost_user_set_features(xd->vlib_hw_if_index, msg.u64);
952 case VHOST_USER_SET_MEM_TABLE:
953 DBG_SOCK("if %d msg VHOST_USER_SET_MEM_TABLE nregions %d",
954 xd->vlib_hw_if_index, msg.memory.nregions);
956 if ((msg.memory.nregions < 1) ||
957 (msg.memory.nregions > VHOST_MEMORY_MAX_NREGIONS)) {
959 DBG_SOCK("number of mem regions must be between 1 and %i",
960 VHOST_MEMORY_MAX_NREGIONS);
965 if (msg.memory.nregions != number_of_fds) {
966 DBG_SOCK("each memory region must have FD");
970 dpdk_vhost_user_set_mem_table(xd->vlib_hw_if_index, &msg.memory, fds);
973 case VHOST_USER_SET_VRING_NUM:
974 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_NUM idx %d num %d",
975 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
977 if ((msg.state.num > 32768) || /* maximum ring size is 32768 */
978 (msg.state.num == 0) || /* it cannot be zero */
979 (msg.state.num % 2)) /* must be power of 2 */
982 dpdk_vhost_user_set_vring_num(xd->vlib_hw_if_index, msg.state.index, msg.state.num);
985 case VHOST_USER_SET_VRING_ADDR:
986 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_ADDR idx %d",
987 xd->vlib_hw_if_index, msg.state.index);
989 dpdk_vhost_user_set_vring_addr(xd->vlib_hw_if_index, msg.state.index,
990 msg.addr.desc_user_addr,
991 msg.addr.used_user_addr,
992 msg.addr.avail_user_addr);
995 case VHOST_USER_SET_OWNER:
996 DBG_SOCK("if %d msg VHOST_USER_SET_OWNER",
997 xd->vlib_hw_if_index);
1000 case VHOST_USER_RESET_OWNER:
1001 DBG_SOCK("if %d msg VHOST_USER_RESET_OWNER",
1002 xd->vlib_hw_if_index);
1005 case VHOST_USER_SET_VRING_CALL:
1006 q = (u8) (msg.u64 & 0xFF);
1008 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_CALL u64 %lx, idx: %d",
1009 xd->vlib_hw_if_index, msg.u64, q);
1011 if (!(msg.u64 & 0x100))
1013 if (number_of_fds != 1)
1019 dpdk_vhost_user_set_vring_call(xd->vlib_hw_if_index, q, fd);
1023 case VHOST_USER_SET_VRING_KICK:
1025 q = (u8) (msg.u64 & 0xFF);
1027 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_KICK u64 %lx, idx: %d",
1028 xd->vlib_hw_if_index, msg.u64, q);
1030 if (!(msg.u64 & 0x100))
1032 if (number_of_fds != 1)
1035 vui->vrings[q].kickfd = fds[0];
1038 vui->vrings[q].kickfd = -1;
1040 dpdk_vhost_user_set_vring_kick(xd->vlib_hw_if_index, q, vui->vrings[q].kickfd);
1043 case VHOST_USER_SET_VRING_ERR:
1045 q = (u8) (msg.u64 & 0xFF);
1047 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_ERR u64 %lx, idx: %d",
1048 xd->vlib_hw_if_index, msg.u64, q);
1050 if (!(msg.u64 & 0x100))
1052 if (number_of_fds != 1)
1060 vui->vrings[q].errfd = fd;
1063 case VHOST_USER_SET_VRING_BASE:
1064 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
1065 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1067 dpdk_vhost_user_set_vring_base(xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1070 case VHOST_USER_GET_VRING_BASE:
1071 DBG_SOCK("if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
1072 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1074 msg.flags |= VHOST_USER_REPLY_MASK;
1075 msg.size = sizeof(msg.state);
1077 dpdk_vhost_user_get_vring_base(xd->vlib_hw_if_index, msg.state.index, &msg.state.num);
1080 case VHOST_USER_NONE:
1081 DBG_SOCK("if %d msg VHOST_USER_NONE",
1082 xd->vlib_hw_if_index);
1085 case VHOST_USER_SET_LOG_BASE:
1086 DBG_SOCK("if %d msg VHOST_USER_SET_LOG_BASE",
1087 xd->vlib_hw_if_index);
1090 case VHOST_USER_SET_LOG_FD:
1091 DBG_SOCK("if %d msg VHOST_USER_SET_LOG_FD",
1092 xd->vlib_hw_if_index);
1095 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
1096 case VHOST_USER_GET_PROTOCOL_FEATURES:
1097 DBG_SOCK("if %d msg VHOST_USER_GET_PROTOCOL_FEATURES",
1098 xd->vlib_hw_if_index);
1100 msg.flags |= VHOST_USER_REPLY_MASK;
1101 msg.u64 = VHOST_USER_PROTOCOL_FEATURES;
1102 DBG_SOCK("VHOST_USER_PROTOCOL_FEATURES: %llx", VHOST_USER_PROTOCOL_FEATURES);
1103 msg.size = sizeof(msg.u64);
1106 case VHOST_USER_SET_PROTOCOL_FEATURES:
1107 DBG_SOCK("if %d msg VHOST_USER_SET_PROTOCOL_FEATURES",
1108 xd->vlib_hw_if_index);
1110 DBG_SOCK("VHOST_USER_SET_PROTOCOL_FEATURES: 0x%lx",
1112 dpdk_vhost_user_set_protocol_features(xd->vlib_hw_if_index,
1116 case VHOST_USER_SET_VRING_ENABLE:
1117 DBG_SOCK("%d VPP VHOST_USER_SET_VRING_ENABLE IDX: %d, Enable: %d",
1118 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1119 dpdk_vhost_user_set_vring_enable
1120 (xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1123 case VHOST_USER_GET_QUEUE_NUM:
1124 DBG_SOCK("if %d msg VHOST_USER_GET_QUEUE_NUM:",
1125 xd->vlib_hw_if_index);
1127 msg.flags |= VHOST_USER_REPLY_MASK;
1128 msg.u64 = xd->vu_vhost_dev.virt_qp_nb;
1129 msg.size = sizeof(msg.u64);
1134 DBG_SOCK("unknown vhost-user message %d received. closing socket",
1139 /* if we have pointers to descriptor table, go up*/
1141 xd->vu_vhost_dev.virtqueue[VHOST_NET_VRING_IDX_TX]->desc &&
1142 xd->vu_vhost_dev.virtqueue[VHOST_NET_VRING_IDX_RX]->desc) {
1144 DBG_SOCK("interface %d connected", xd->vlib_sw_if_index);
1146 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP);
1151 /* if we need to reply */
1152 if (msg.flags & VHOST_USER_REPLY_MASK)
1154 n = send(uf->file_descriptor, &msg, VHOST_USER_MSG_HDR_SZ + msg.size, 0);
1155 if (n != (msg.size + VHOST_USER_MSG_HDR_SZ))
1162 DBG_SOCK("error: close_socket");
1163 dpdk_vhost_user_if_disconnect(xd);
1167 static clib_error_t * dpdk_vhost_user_socket_error (unix_file_t * uf)
1169 dpdk_main_t * dm = &dpdk_main;
1173 p = hash_get (dm->vu_sw_if_index_by_sock_fd, uf->file_descriptor);
1175 DBG_SOCK ("FD %d doesn't belong to any interface",
1176 uf->file_descriptor);
1180 xd = dpdk_vhost_user_device_from_sw_if_index(p[0]);
1182 dpdk_vhost_user_if_disconnect(xd);
1186 static clib_error_t * dpdk_vhost_user_socksvr_accept_ready (unix_file_t * uf)
1188 int client_fd, client_len;
1189 struct sockaddr_un client;
1190 unix_file_t template = {0};
1191 dpdk_main_t * dm = &dpdk_main;
1192 dpdk_device_t * xd = NULL;
1193 dpdk_vu_intf_t * vui;
1196 p = hash_get (dm->vu_sw_if_index_by_listener_fd,
1197 uf->file_descriptor);
1199 DBG_SOCK ("fd %d doesn't belong to any interface",
1200 uf->file_descriptor);
1204 xd = dpdk_vhost_user_device_from_sw_if_index(p[0]);
1208 client_len = sizeof(client);
1209 client_fd = accept (uf->file_descriptor,
1210 (struct sockaddr *)&client,
1211 (socklen_t *)&client_len);
1214 return clib_error_return_unix (0, "accept");
1216 template.read_function = dpdk_vhost_user_socket_read;
1217 template.error_function = dpdk_vhost_user_socket_error;
1218 template.file_descriptor = client_fd;
1219 vui->unix_file_index = unix_file_add (&unix_main, &template);
1221 vui->client_fd = client_fd;
1222 hash_set (dm->vu_sw_if_index_by_sock_fd, vui->client_fd,
1223 xd->vlib_sw_if_index);
1228 // init server socket on specified sock_filename
1229 static int dpdk_vhost_user_init_server_sock(const char * sock_filename, int *sockfd)
1232 struct sockaddr_un un;
1234 /* create listening socket */
1235 fd = socket(AF_UNIX, SOCK_STREAM, 0);
1238 return VNET_API_ERROR_SYSCALL_ERROR_1;
1241 un.sun_family = AF_UNIX;
1242 strcpy((char *) un.sun_path, (char *) sock_filename);
1244 /* remove if exists */
1245 unlink( (char *) sock_filename);
1247 len = strlen((char *) un.sun_path) + strlen((char *) sock_filename);
1249 if (bind(fd, (struct sockaddr *) &un, len) == -1) {
1250 rv = VNET_API_ERROR_SYSCALL_ERROR_2;
1254 if (listen(fd, 1) == -1) {
1255 rv = VNET_API_ERROR_SYSCALL_ERROR_3;
1259 unix_file_t template = {0};
1260 template.read_function = dpdk_vhost_user_socksvr_accept_ready;
1261 template.file_descriptor = fd;
1262 unix_file_add (&unix_main, &template);
1272 * vhost-user interface control functions used from vpe api
1275 int dpdk_vhost_user_create_if(vnet_main_t * vnm, vlib_main_t * vm,
1276 const char * sock_filename,
1280 u8 renumber, u32 custom_dev_instance)
1282 dpdk_main_t * dm = &dpdk_main;
1288 // using virtio vhost user?
1289 if (dm->use_virtio_vhost) {
1290 return vhost_user_create_if(vnm, vm, sock_filename, is_server,
1291 sw_if_index, feature_mask, renumber, custom_dev_instance);
1295 if ((rv = dpdk_vhost_user_init_server_sock (sock_filename, &sockfd)) != 0) {
1301 // set next vhost-user if id if custom one is higher or equal
1302 if (custom_dev_instance >= dm->next_vu_if_id)
1303 dm->next_vu_if_id = custom_dev_instance + 1;
1305 dpdk_create_vhost_user_if_internal(&hw_if_idx, custom_dev_instance);
1307 dpdk_create_vhost_user_if_internal(&hw_if_idx, (u32)~0);
1308 DBG_SOCK("dpdk vhost-user interface created hw_if_index %d", hw_if_idx);
1310 xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_idx);
1313 dpdk_vhost_user_vui_init (vnm, xd, sockfd, sock_filename, is_server,
1314 feature_mask, sw_if_index);
1316 dpdk_vhost_user_vui_register (vm, xd);
1320 int dpdk_vhost_user_modify_if(vnet_main_t * vnm, vlib_main_t * vm,
1321 const char * sock_filename,
1325 u8 renumber, u32 custom_dev_instance)
1327 dpdk_main_t * dm = &dpdk_main;
1329 dpdk_vu_intf_t * vui = NULL;
1334 // using virtio vhost user?
1335 if (dm->use_virtio_vhost) {
1336 return vhost_user_modify_if(vnm, vm, sock_filename, is_server,
1337 sw_if_index, feature_mask, renumber, custom_dev_instance);
1340 xd = dpdk_vhost_user_device_from_sw_if_index(sw_if_index);
1343 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
1347 // interface is inactive
1349 // disconnect interface sockets
1350 dpdk_vhost_user_if_disconnect(xd);
1353 if ((rv = dpdk_vhost_user_init_server_sock (sock_filename, &sockfd)) != 0) {
1358 dpdk_vhost_user_vui_init (vnm, xd, sockfd, sock_filename, is_server,
1359 feature_mask, &sw_if_idx);
1362 vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
1365 dpdk_vhost_user_vui_register (vm, xd);
1370 int dpdk_vhost_user_delete_if(vnet_main_t * vnm, vlib_main_t * vm,
1373 dpdk_main_t * dm = &dpdk_main;
1374 dpdk_device_t * xd = NULL;
1375 dpdk_vu_intf_t * vui;
1378 // using virtio vhost user?
1379 if (dm->use_virtio_vhost) {
1380 return vhost_user_delete_if(vnm, vm, sw_if_index);
1383 xd = dpdk_vhost_user_device_from_sw_if_index(sw_if_index);
1386 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
1390 // interface is inactive
1392 // disconnect interface sockets
1393 dpdk_vhost_user_if_disconnect(xd);
1394 // add to inactive interface list
1395 vec_add1 (dm->vu_inactive_interfaces_device_index, xd->device_index);
1397 ethernet_delete_interface (vnm, xd->vlib_hw_if_index);
1398 DBG_SOCK ("deleted (deactivated) vhost-user interface sw_if_index %d", sw_if_index);
1403 int dpdk_vhost_user_dump_ifs(vnet_main_t * vnm, vlib_main_t * vm, vhost_user_intf_details_t **out_vuids)
1406 dpdk_main_t * dm = &dpdk_main;
1408 dpdk_vu_intf_t * vui;
1409 struct virtio_net * vhost_dev;
1410 vhost_user_intf_details_t * r_vuids = NULL;
1411 vhost_user_intf_details_t * vuid = NULL;
1412 u32 * hw_if_indices = 0;
1413 vnet_hw_interface_t * hi;
1420 // using virtio vhost user?
1421 if (dm->use_virtio_vhost) {
1422 return vhost_user_dump_ifs(vnm, vm, out_vuids);
1425 vec_foreach (xd, dm->devices) {
1426 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER &&
1427 xd->vu_intf->active)
1428 vec_add1(hw_if_indices, xd->vlib_hw_if_index);
1431 for (i = 0; i < vec_len (hw_if_indices); i++) {
1432 hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
1433 xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_indices[i]);
1435 clib_warning("invalid vhost-user interface hw_if_index %d", hw_if_indices[i]);
1440 ASSERT(vui != NULL);
1441 vhost_dev = &xd->vu_vhost_dev;
1442 u32 virtio_net_hdr_sz = (vui->num_vrings > 0 ?
1443 vhost_dev->virtqueue[0]->vhost_hlen : 0);
1445 vec_add2(r_vuids, vuid, 1);
1446 vuid->sw_if_index = xd->vlib_sw_if_index;
1447 vuid->virtio_net_hdr_sz = virtio_net_hdr_sz;
1448 vuid->features = vhost_dev->features;
1449 vuid->is_server = vui->sock_is_server;
1450 vuid->num_regions = (vhost_dev->mem != NULL ? vhost_dev->mem->nregions : 0);
1451 vuid->sock_errno = vui->sock_errno;
1452 strncpy((char *)vuid->sock_filename, (char *)vui->sock_filename,
1453 ARRAY_LEN(vuid->sock_filename)-1);
1455 s = format (s, "%v%c", hi->name, 0);
1457 strncpy((char *)vuid->if_name, (char *)s,
1458 ARRAY_LEN(vuid->if_name)-1);
1463 vec_free (hw_if_indices);
1465 *out_vuids = r_vuids;
1471 * Processing functions called from dpdk process fn
1475 struct sockaddr_un sun;
1477 unix_file_t template;
1479 } dpdk_vu_process_state;
1481 void dpdk_vhost_user_process_init (void **ctx)
1483 dpdk_vu_process_state *state = clib_mem_alloc (sizeof(dpdk_vu_process_state));
1484 memset(state, 0, sizeof(*state));
1485 state->sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
1486 state->sun.sun_family = AF_UNIX;
1487 state->template.read_function = dpdk_vhost_user_socket_read;
1488 state->template.error_function = dpdk_vhost_user_socket_error;
1489 state->event_data = 0;
1493 void dpdk_vhost_user_process_cleanup (void *ctx)
1498 uword dpdk_vhost_user_process_if (vlib_main_t *vm, dpdk_device_t *xd, void *ctx)
1500 dpdk_main_t * dm = &dpdk_main;
1501 dpdk_vu_process_state *state = (dpdk_vu_process_state *)ctx;
1502 dpdk_vu_intf_t *vui = xd->vu_intf;
1504 if (vui->sock_is_server || !vui->active)
1507 if (vui->unix_fd == -1) {
1508 /* try to connect */
1509 strncpy(state->sun.sun_path, (char *) vui->sock_filename, sizeof(state->sun.sun_path) - 1);
1511 if (connect(state->sockfd, (struct sockaddr *) &(state->sun), sizeof(struct sockaddr_un)) == 0) {
1512 vui->sock_errno = 0;
1513 vui->unix_fd = state->sockfd;
1514 state->template.file_descriptor = state->sockfd;
1515 vui->unix_file_index = unix_file_add (&unix_main, &(state->template));
1516 hash_set (dm->vu_sw_if_index_by_sock_fd, state->sockfd, xd->vlib_sw_if_index);
1518 state->sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
1519 if (state->sockfd < 0)
1522 vui->sock_errno = errno;
1525 /* check if socket is alive */
1527 socklen_t len = sizeof (error);
1528 int retval = getsockopt(vui->unix_fd, SOL_SOCKET, SO_ERROR, &error, &len);
1531 dpdk_vhost_user_if_disconnect(xd);
1540 static clib_error_t *
1541 dpdk_vhost_user_connect_command_fn (vlib_main_t * vm,
1542 unformat_input_t * input,
1543 vlib_cli_command_t * cmd)
1545 dpdk_main_t * dm = &dpdk_main;
1546 unformat_input_t _line_input, * line_input = &_line_input;
1547 u8 * sock_filename = NULL;
1550 u64 feature_mask = (u64)~0;
1552 u32 custom_dev_instance = ~0;
1554 if (dm->use_virtio_vhost) {
1555 return vhost_user_connect_command_fn(vm, input, cmd);
1558 /* Get a line of input. */
1559 if (! unformat_user (input, unformat_line_input, line_input))
1562 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
1563 if (unformat (line_input, "socket %s", &sock_filename))
1565 else if (unformat (line_input, "server"))
1567 else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
1569 else if (unformat (line_input, "renumber %d", &custom_dev_instance)) {
1573 return clib_error_return (0, "unknown input `%U'",
1574 format_unformat_error, input);
1576 unformat_free (line_input);
1578 vnet_main_t *vnm = vnet_get_main();
1579 if (sock_filename == NULL)
1580 return clib_error_return (0, "missing socket file");
1582 dpdk_vhost_user_create_if(vnm, vm, (char *)sock_filename,
1583 is_server, &sw_if_index, feature_mask,
1584 renumber, custom_dev_instance);
1586 vec_free(sock_filename);
1590 VLIB_CLI_COMMAND (dpdk_vhost_user_connect_command, static) = {
1591 .path = "create vhost-user",
1592 .short_help = "create vhost-user socket <socket-filename> [server] [feature-mask <hex>] [renumber <dev_instance>]",
1593 .function = dpdk_vhost_user_connect_command_fn,
1596 static clib_error_t *
1597 dpdk_vhost_user_delete_command_fn (vlib_main_t * vm,
1598 unformat_input_t * input,
1599 vlib_cli_command_t * cmd)
1601 dpdk_main_t * dm = &dpdk_main;
1602 clib_error_t * error = 0;
1603 unformat_input_t _line_input, * line_input = &_line_input;
1604 u32 sw_if_index = ~0;
1606 if (dm->use_virtio_vhost) {
1607 return vhost_user_delete_command_fn(vm, input, cmd);
1610 /* Get a line of input. */
1611 if (! unformat_user (input, unformat_line_input, line_input))
1614 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
1615 if (unformat (line_input, "sw_if_index %d", &sw_if_index))
1618 return clib_error_return (0, "unknown input `%U'",
1619 format_unformat_error, input);
1621 unformat_free (line_input);
1623 if (sw_if_index == ~0) {
1624 error = clib_error_return (0, "invalid sw_if_index",
1625 format_unformat_error, input);
1629 vnet_main_t *vnm = vnet_get_main();
1631 dpdk_vhost_user_delete_if(vnm, vm, sw_if_index);
1636 VLIB_CLI_COMMAND (dpdk_vhost_user_delete_command, static) = {
1637 .path = "delete vhost-user",
1638 .short_help = "delete vhost-user sw_if_index <nn>",
1639 .function = dpdk_vhost_user_delete_command_fn,
1642 #define foreach_dpdk_vhost_feature \
1643 _ (VIRTIO_NET_F_MRG_RXBUF) \
1644 _ (VIRTIO_NET_F_CTRL_VQ) \
1645 _ (VIRTIO_NET_F_CTRL_RX)
1647 static clib_error_t *
1648 show_dpdk_vhost_user_command_fn (vlib_main_t * vm,
1649 unformat_input_t * input,
1650 vlib_cli_command_t * cmd)
1652 clib_error_t * error = 0;
1653 dpdk_main_t * dm = &dpdk_main;
1654 vnet_main_t * vnm = vnet_get_main();
1656 dpdk_vu_intf_t * vui;
1657 struct virtio_net * vhost_dev;
1658 u32 hw_if_index, * hw_if_indices = 0;
1659 vnet_hw_interface_t * hi;
1662 struct virtio_memory * mem;
1663 struct feat_struct { u8 bit; char *str;};
1664 struct feat_struct *feat_entry;
1666 static struct feat_struct feat_array[] = {
1667 #define _(f) { .str = #f, .bit = f, },
1668 foreach_dpdk_vhost_feature
1673 if (dm->use_virtio_vhost) {
1674 return show_vhost_user_command_fn(vm, input, cmd);
1677 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
1678 if (unformat (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index)) {
1679 vec_add1 (hw_if_indices, hw_if_index);
1680 vlib_cli_output(vm, "add %d", hw_if_index);
1682 else if (unformat (input, "descriptors") || unformat (input, "desc") )
1685 error = clib_error_return (0, "unknown input `%U'",
1686 format_unformat_error, input);
1690 if (vec_len (hw_if_indices) == 0) {
1691 vec_foreach (xd, dm->devices) {
1692 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER && xd->vu_intf->active)
1693 vec_add1(hw_if_indices, xd->vlib_hw_if_index);
1697 vlib_cli_output (vm, "DPDK vhost-user interfaces");
1698 vlib_cli_output (vm, "Global:\n coalesce frames %d time %e\n\n",
1699 dm->vhost_coalesce_frames, dm->vhost_coalesce_time);
1701 for (i = 0; i < vec_len (hw_if_indices); i++) {
1702 hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
1704 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_indices[i]))) {
1705 error = clib_error_return (0, "not dpdk vhost-user interface: '%s'",
1710 vhost_dev = &xd->vu_vhost_dev;
1711 mem = vhost_dev->mem;
1712 u32 virtio_net_hdr_sz = (vui->num_vrings > 0 ?
1713 vhost_dev->virtqueue[0]->vhost_hlen : 0);
1715 vlib_cli_output (vm, "Interface: %v (ifindex %d)",
1716 hi->name, hw_if_indices[i]);
1718 vlib_cli_output (vm, "virtio_net_hdr_sz %d\n features (0x%llx): \n",
1719 virtio_net_hdr_sz, xd->vu_vhost_dev.features);
1721 feat_entry = (struct feat_struct *) &feat_array;
1722 while(feat_entry->str) {
1723 if (xd->vu_vhost_dev.features & (1 << feat_entry->bit))
1724 vlib_cli_output (vm, " %s (%d)", feat_entry->str, feat_entry->bit);
1728 vlib_cli_output (vm, "\n");
1730 vlib_cli_output (vm, " socket filename %s type %s errno \"%s\"\n\n",
1731 vui->sock_filename, vui->sock_is_server ? "server" : "client",
1732 strerror(vui->sock_errno));
1734 vlib_cli_output (vm, " Memory regions (total %d)\n", mem->nregions);
1737 vlib_cli_output(vm, " region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr\n");
1738 vlib_cli_output(vm, " ====== ===== ================== ================== ================== ================== ==================\n");
1740 for (j = 0; j < mem->nregions; j++) {
1741 vlib_cli_output(vm, " %d %-5d 0x%016lx 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n", j,
1743 mem->regions[j].guest_phys_address,
1744 mem->regions[j].memory_size,
1745 mem->regions[j].userspace_address,
1746 mem->regions[j].address_offset,
1747 vui->region_addr[j]);
1749 for (q = 0; q < vui->num_vrings; q++) {
1750 struct vhost_virtqueue *vq = vhost_dev->virtqueue[q];
1751 const char *qtype = (q & 1) ? "TX" : "RX";
1753 vlib_cli_output(vm, "\n Virtqueue %d (%s)\n", q/2, qtype);
1755 vlib_cli_output(vm, " qsz %d last_used_idx %d last_used_idx_res %d\n",
1756 vq->size, vq->last_used_idx, vq->last_used_idx_res);
1758 if (vq->avail && vq->used)
1759 vlib_cli_output(vm, " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
1760 vq->avail->flags, vq->avail->idx, vq->used->flags, vq->used->idx);
1762 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
1763 vlib_cli_output(vm, " kickfd %d callfd %d errfd %d enabled %d\n",
1764 vq->kickfd, vq->callfd, vui->vrings[q].errfd, vq->enabled);
1766 if (show_descr && vq->enabled) {
1768 vlib_cli_output(vm, " kickfd %d callfd %d errfd\n",
1769 vq->kickfd, vq->callfd, vui->vrings[q].errfd);
1773 vlib_cli_output(vm, "\n descriptor table:\n");
1774 vlib_cli_output(vm, " id addr len flags next user_addr\n");
1775 vlib_cli_output(vm, " ===== ================== ===== ====== ===== ==================\n");
1776 for(j = 0; j < vq->size; j++) {
1777 vlib_cli_output(vm, " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
1783 (u64) map_guest_mem(xd, vq->desc[j].addr));}
1786 vlib_cli_output (vm, "\n");
1789 vec_free (hw_if_indices);
1793 VLIB_CLI_COMMAND (show_vhost_user_command, static) = {
1794 .path = "show vhost-user",
1795 .short_help = "show vhost-user interface",
1796 .function = show_dpdk_vhost_user_command_fn,