2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <sys/socket.h>
21 #include <vlib/vlib.h>
22 #include <vlib/unix/unix.h>
24 #include <vnet/vnet.h>
25 #include <vppinfra/vec.h>
26 #include <vppinfra/error.h>
27 #include <vppinfra/format.h>
29 #include <vnet/ethernet/ethernet.h>
30 #include <vnet/devices/dpdk/dpdk.h>
32 #include <vnet/devices/virtio/vhost-user.h>
34 #define VHOST_USER_DEBUG_SOCKET 0
36 #if VHOST_USER_DEBUG_SOCKET == 1
37 #define DBG_SOCK(args...) clib_warning(args);
39 #define DBG_SOCK(args...)
45 static const char *vhost_message_str[] __attribute__ ((unused)) =
47 [VHOST_USER_NONE] = "VHOST_USER_NONE",
48 [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
49 [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
50 [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
51 [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
52 [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
53 [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
54 [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
55 [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
56 [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
57 [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
58 [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
59 [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
60 [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
61 [VHOST_USER_SET_VRING_ERR] = "VHOST_USER_SET_VRING_ERR",
62 [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
63 [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
64 [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
65 [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
69 static int dpdk_vhost_user_set_vring_enable (u32 hw_if_index,
73 * DPDK vhost-user functions
76 /* portions taken from dpdk
79 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
80 * All rights reserved.
82 * Redistribution and use in source and binary forms, with or without
83 * modification, are permitted provided that the following conditions
86 * * Redistributions of source code must retain the above copyright
87 * notice, this list of conditions and the following disclaimer.
88 * * Redistributions in binary form must reproduce the above copyright
89 * notice, this list of conditions and the following disclaimer in
90 * the documentation and/or other materials provided with the
92 * * Neither the name of Intel Corporation nor the names of its
93 * contributors may be used to endorse or promote products derived
94 * from this software without specific prior written permission.
96 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
97 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
98 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
99 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
100 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
101 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
102 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
103 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
104 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
105 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
106 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
111 qva_to_vva (struct virtio_net *dev, uword qemu_va)
113 struct virtio_memory_regions *region;
115 uint32_t regionidx = 0;
117 /* Find the region where the address lives. */
118 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++)
120 region = &dev->mem->regions[regionidx];
121 if ((qemu_va >= region->userspace_address) &&
122 (qemu_va <= region->userspace_address + region->memory_size))
124 vhost_va = qemu_va + region->guest_phys_address +
125 region->address_offset - region->userspace_address;
132 static dpdk_device_t *
133 dpdk_vhost_user_device_from_hw_if_index (u32 hw_if_index)
135 vnet_main_t *vnm = vnet_get_main ();
136 dpdk_main_t *dm = &dpdk_main;
137 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
138 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
140 if (xd->dev_type != VNET_DPDK_DEV_VHOST_USER)
146 static dpdk_device_t *
147 dpdk_vhost_user_device_from_sw_if_index (u32 sw_if_index)
149 vnet_main_t *vnm = vnet_get_main ();
150 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
151 ASSERT (sw->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
153 return dpdk_vhost_user_device_from_hw_if_index (sw->hw_if_index);
157 stop_processing_packets (u32 hw_if_index, u8 idx)
159 dpdk_device_t *xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_index);
161 xd->vu_vhost_dev.virtqueue[idx]->enabled = 0;
165 disable_interface (dpdk_device_t * xd)
168 int numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
169 for (idx = 0; idx < numqs; idx++)
170 xd->vu_vhost_dev.virtqueue[idx]->enabled = 0;
172 xd->vu_is_running = 0;
176 map_guest_mem (dpdk_device_t * xd, uword addr)
178 dpdk_vu_intf_t *vui = xd->vu_intf;
179 struct virtio_memory *mem = xd->vu_vhost_dev.mem;
181 for (i = 0; i < mem->nregions; i++)
183 if ((mem->regions[i].guest_phys_address <= addr) &&
184 ((mem->regions[i].guest_phys_address +
185 mem->regions[i].memory_size) > addr))
187 return (void *) ((uword) vui->region_addr[i] + addr -
188 (uword) mem->regions[i].guest_phys_address);
191 DBG_SOCK ("failed to map guest mem addr %lx", addr);
195 static clib_error_t *
196 dpdk_create_vhost_user_if_internal (u32 * hw_if_index, u32 if_id, u8 * hwaddr)
198 dpdk_main_t *dm = &dpdk_main;
199 vlib_main_t *vm = vlib_get_main ();
200 vlib_thread_main_t *tm = vlib_get_thread_main ();
201 vnet_sw_interface_t *sw;
203 dpdk_device_and_queue_t *dq;
205 dpdk_vu_intf_t *vui = NULL;
207 num_qpairs = dm->use_rss < 1 ? 1 : tm->n_vlib_mains;
209 dpdk_device_t *xd = NULL;
213 vlib_worker_thread_barrier_sync (vm);
215 int inactive_cnt = vec_len (dm->vu_inactive_interfaces_device_index);
216 // if there are any inactive ifaces
217 if (inactive_cnt > 0)
220 u32 vui_idx = dm->vu_inactive_interfaces_device_index[inactive_cnt - 1];
221 if (vec_len (dm->devices) > vui_idx)
223 xd = vec_elt_at_index (dm->devices, vui_idx);
224 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
227 ("reusing inactive vhost-user interface sw_if_index %d",
228 xd->vlib_sw_if_index);
233 ("error: inactive vhost-user interface sw_if_index %d not VHOST_USER type!",
234 xd->vlib_sw_if_index);
235 // reset so new interface is created
239 // "remove" from inactive list
240 _vec_len (dm->vu_inactive_interfaces_device_index) -= 1;
245 // existing interface used - do not overwrite if_id if not needed
246 if (if_id != (u32) ~ 0)
247 xd->vu_if_id = if_id;
251 for (j = 0; j < num_qpairs * VIRTIO_QNUM; j++)
253 memset (xd->vu_vhost_dev.virtqueue[j], 0,
254 sizeof (struct vhost_virtqueue));
255 xd->vu_vhost_dev.virtqueue[j]->kickfd = -1;
256 xd->vu_vhost_dev.virtqueue[j]->callfd = -1;
257 xd->vu_vhost_dev.virtqueue[j]->backend = -1;
258 vui->vrings[j].packets = 0;
259 vui->vrings[j].bytes = 0;
263 dpdk_device_lock_free (xd);
264 dpdk_device_lock_init (xd);
267 for (j = 0; j < tm->n_vlib_mains; j++)
269 vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE,
270 sizeof (tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES);
271 vec_reset_length (xd->tx_vectors[j]);
275 for (j = 0; j < xd->rx_q_used; j++)
277 vec_validate_aligned (xd->rx_vectors[j], VLIB_FRAME_SIZE - 1,
278 CLIB_CACHE_LINE_BYTES);
279 vec_reset_length (xd->rx_vectors[j]);
284 // vui was not retrieved from inactive ifaces - create new
285 vec_add2_aligned (dm->devices, xd, 1, CLIB_CACHE_LINE_BYTES);
286 xd->dev_type = VNET_DPDK_DEV_VHOST_USER;
287 xd->rx_q_used = num_qpairs;
288 xd->tx_q_used = num_qpairs;
289 xd->vu_vhost_dev.virt_qp_nb = num_qpairs;
291 vec_validate_aligned (xd->rx_vectors, xd->rx_q_used,
292 CLIB_CACHE_LINE_BYTES);
294 if (if_id == (u32) ~ 0)
295 xd->vu_if_id = dm->next_vu_if_id++;
297 xd->vu_if_id = if_id;
299 xd->device_index = xd - dm->devices;
300 xd->per_interface_next_index = ~0;
301 xd->vu_intf = clib_mem_alloc (sizeof (*(xd->vu_intf)));
303 xd->vu_vhost_dev.mem = clib_mem_alloc (sizeof (struct virtio_memory) +
304 VHOST_MEMORY_MAX_NREGIONS *
306 virtio_memory_regions));
308 /* Will be set when guest sends VHOST_USER_SET_MEM_TABLE cmd */
309 xd->vu_vhost_dev.mem->nregions = 0;
312 * New virtqueue structure is an array of VHOST_MAX_QUEUE_PAIRS * 2
313 * We need to allocate numq pairs.
316 for (j = 0; j < num_qpairs * VIRTIO_QNUM; j++)
318 xd->vu_vhost_dev.virtqueue[j] =
319 clib_mem_alloc (sizeof (struct vhost_virtqueue));
320 memset (xd->vu_vhost_dev.virtqueue[j], 0,
321 sizeof (struct vhost_virtqueue));
322 xd->vu_vhost_dev.virtqueue[j]->kickfd = -1;
323 xd->vu_vhost_dev.virtqueue[j]->callfd = -1;
324 xd->vu_vhost_dev.virtqueue[j]->backend = -1;
325 vui->vrings[j].packets = 0;
326 vui->vrings[j].bytes = 0;
329 dpdk_device_lock_init (xd);
332 ("tm->n_vlib_mains: %d. TX %d, RX: %d, num_qpairs: %d, Lock: %p",
333 tm->n_vlib_mains, xd->tx_q_used, xd->rx_q_used, num_qpairs,
336 vec_validate_aligned (xd->tx_vectors, tm->n_vlib_mains,
337 CLIB_CACHE_LINE_BYTES);
339 for (j = 0; j < tm->n_vlib_mains; j++)
341 vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE,
342 sizeof (tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES);
343 vec_reset_length (xd->tx_vectors[j]);
347 for (j = 0; j < xd->rx_q_used; j++)
349 vec_validate_aligned (xd->rx_vectors[j], VLIB_FRAME_SIZE - 1,
350 CLIB_CACHE_LINE_BYTES);
351 vec_reset_length (xd->rx_vectors[j]);
356 * Generate random MAC address for the interface
360 clib_memcpy (addr, hwaddr, sizeof (addr));
364 f64 now = vlib_time_now (vm);
366 rnd = (u32) (now * 1e6);
367 rnd = random_u32 (&rnd);
369 clib_memcpy (addr + 2, &rnd, sizeof (rnd));
374 error = ethernet_register_interface
375 (dm->vnet_main, dpdk_device_class.index, xd->device_index,
376 /* ethernet address */ addr,
377 &xd->vlib_hw_if_index, 0);
382 sw = vnet_get_hw_sw_interface (dm->vnet_main, xd->vlib_hw_if_index);
383 xd->vlib_sw_if_index = sw->sw_if_index;
385 *hw_if_index = xd->vlib_hw_if_index;
387 DBG_SOCK ("xd->device_index: %d, dm->input_cpu_count: %d, "
388 "dm->input_cpu_first_index: %d\n", xd->device_index,
389 dm->input_cpu_count, dm->input_cpu_first_index);
392 for (q = 0; q < num_qpairs; q++)
394 int cpu = dm->input_cpu_first_index + (next_cpu % dm->input_cpu_count);
396 unsigned lcore = vlib_worker_threads[cpu].dpdk_lcore_id;
397 vec_validate (xd->cpu_socket_id_by_queue, q);
398 xd->cpu_socket_id_by_queue[q] = rte_lcore_to_socket_id (lcore);
400 vec_add2 (dm->devices_by_cpu[cpu], dq, 1);
401 dq->device = xd->device_index;
403 DBG_SOCK ("CPU for %d = %d. QID: %d", *hw_if_index, cpu, dq->queue_id);
405 // start polling if it was not started yet (because of no phys ifaces)
406 if (tm->n_vlib_mains == 1
407 && dpdk_input_node.state != VLIB_NODE_STATE_POLLING)
408 vlib_node_set_state (vm, dpdk_input_node.index,
409 VLIB_NODE_STATE_POLLING);
411 if (tm->n_vlib_mains > 1)
412 vlib_node_set_state (vlib_mains[cpu], dpdk_input_node.index,
413 VLIB_NODE_STATE_POLLING);
417 vlib_worker_thread_barrier_release (vm);
421 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
423 get_huge_page_size (int fd)
431 static clib_error_t *
432 dpdk_vhost_user_set_protocol_features (u32 hw_if_index, u64 prot_features)
435 xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_index);
437 xd->vu_vhost_dev.protocol_features = prot_features;
441 static clib_error_t *
442 dpdk_vhost_user_get_features (u32 hw_if_index, u64 * features)
444 *features = rte_vhost_feature_get ();
446 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
447 #define OFFLOAD_FEATURES ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
448 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
449 (1ULL << VIRTIO_NET_F_CSUM) | \
450 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
451 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
452 (1ULL << VIRTIO_NET_F_GUEST_TSO6))
454 /* These are not suppoted as bridging/tunneling VHOST
455 * interfaces with hardware interfaces/drivers that does
456 * not support offloading breaks L4 traffic.
458 *features &= (~OFFLOAD_FEATURES);
461 DBG_SOCK ("supported features: 0x%lx", *features);
465 static clib_error_t *
466 dpdk_vhost_user_set_features (u32 hw_if_index, u64 features)
469 u16 hdr_len = sizeof (struct virtio_net_hdr);
472 if (!(xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_index)))
474 clib_warning ("not a vhost-user interface");
478 xd->vu_vhost_dev.features = features;
480 if (xd->vu_vhost_dev.features & (1 << VIRTIO_NET_F_MRG_RXBUF))
481 hdr_len = sizeof (struct virtio_net_hdr_mrg_rxbuf);
483 int numqs = VIRTIO_QNUM;
485 int prot_feature = features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
486 numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
487 for (idx = 0; idx < numqs; idx++)
489 xd->vu_vhost_dev.virtqueue[idx]->vhost_hlen = hdr_len;
491 * Spec says, if F_PROTOCOL_FEATURE is not set by the
492 * slave, then all the vrings should start off as
493 * enabled. If slave negotiates F_PROTOCOL_FEATURE, then
494 * slave is responsible to enable it.
497 dpdk_vhost_user_set_vring_enable (hw_if_index, idx, 1);
503 static clib_error_t *
504 dpdk_vhost_user_set_mem_table (u32 hw_if_index, vhost_user_memory_t * vum,
507 struct virtio_memory *mem;
512 if (!(xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_index)))
514 clib_warning ("not a vhost-user interface");
519 mem = xd->vu_vhost_dev.mem;
521 mem->nregions = vum->nregions;
523 for (i = 0; i < mem->nregions; i++)
525 u64 mapped_size, mapped_address;
527 mem->regions[i].guest_phys_address = vum->regions[i].guest_phys_addr;
528 mem->regions[i].guest_phys_address_end =
529 vum->regions[i].guest_phys_addr + vum->regions[i].memory_size;
530 mem->regions[i].memory_size = vum->regions[i].memory_size;
531 mem->regions[i].userspace_address = vum->regions[i].userspace_addr;
533 mapped_size = mem->regions[i].memory_size + vum->regions[i].mmap_offset;
535 pointer_to_uword (mmap
536 (NULL, mapped_size, PROT_READ | PROT_WRITE,
537 MAP_SHARED, fd[i], 0));
539 if (uword_to_pointer (mapped_address, void *) == MAP_FAILED)
541 clib_warning ("mmap error");
545 mapped_address += vum->regions[i].mmap_offset;
546 vui->region_addr[i] = mapped_address;
547 vui->region_fd[i] = fd[i];
548 vui->region_offset[i] = vum->regions[i].mmap_offset;
549 mem->regions[i].address_offset =
550 mapped_address - mem->regions[i].guest_phys_address;
552 DBG_SOCK ("map memory region %d addr 0x%lx off 0x%lx len 0x%lx",
553 i, vui->region_addr[i], vui->region_offset[i], mapped_size);
555 if (vum->regions[i].guest_phys_addr == 0)
557 mem->base_address = vum->regions[i].userspace_addr;
558 mem->mapped_address = mem->regions[i].address_offset;
562 disable_interface (xd);
566 static clib_error_t *
567 dpdk_vhost_user_set_vring_num (u32 hw_if_index, u8 idx, u32 num)
570 struct vhost_virtqueue *vq;
572 DBG_SOCK ("idx %u num %u", idx, num);
574 if (!(xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_index)))
576 clib_warning ("not a vhost-user interface");
579 vq = xd->vu_vhost_dev.virtqueue[idx];
582 stop_processing_packets (hw_if_index, idx);
587 static clib_error_t *
588 dpdk_vhost_user_set_vring_addr (u32 hw_if_index, u8 idx, uword desc,
589 uword used, uword avail, uword log)
592 struct vhost_virtqueue *vq;
594 DBG_SOCK ("idx %u desc 0x%lx used 0x%lx avail 0x%lx log 0x%lx",
595 idx, desc, used, avail, log);
597 if (!(xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_index)))
599 clib_warning ("not a vhost-user interface");
602 vq = xd->vu_vhost_dev.virtqueue[idx];
604 vq->desc = (struct vring_desc *) qva_to_vva (&xd->vu_vhost_dev, desc);
605 vq->used = (struct vring_used *) qva_to_vva (&xd->vu_vhost_dev, used);
606 vq->avail = (struct vring_avail *) qva_to_vva (&xd->vu_vhost_dev, avail);
607 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
608 vq->log_guest_addr = log;
611 if (!(vq->desc && vq->used && vq->avail))
613 clib_warning ("falied to set vring addr");
616 if (vq->last_used_idx != vq->used->idx)
618 clib_warning ("last_used_idx (%u) and vq->used->idx (%u) mismatches; "
619 "some packets maybe resent for Tx and dropped for Rx",
620 vq->last_used_idx, vq->used->idx);
621 vq->last_used_idx = vq->used->idx;
622 vq->last_used_idx_res = vq->used->idx;
626 * Inform the guest that there is no need to inform (kick) the
627 * host when it adds buffers. kick results in vmexit and will
628 * incur performance degradation.
630 * The below function sets a flag in used table. Therefore,
631 * should be initialized after initializing vq->used.
633 rte_vhost_enable_guest_notification (&xd->vu_vhost_dev, idx, 0);
634 stop_processing_packets (hw_if_index, idx);
639 static clib_error_t *
640 dpdk_vhost_user_get_vring_base (u32 hw_if_index, u8 idx, u32 * num)
643 struct vhost_virtqueue *vq;
645 if (!(xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_index)))
647 clib_warning ("not a vhost-user interface");
651 vq = xd->vu_vhost_dev.virtqueue[idx];
652 *num = vq->last_used_idx;
656 * Client must start ring upon receiving a kick
657 * (that is, detecting that file descriptor is readable)
658 * on the descriptor specified by VHOST_USER_SET_VRING_KICK,
659 * and stop ring upon receiving VHOST_USER_GET_VRING_BASE.
661 DBG_SOCK ("Stopping vring Q %u of device %d", idx, hw_if_index);
662 dpdk_vu_intf_t *vui = xd->vu_intf;
663 vui->vrings[idx].enabled = 0; /* Reset local copy */
664 vui->vrings[idx].callfd = -1; /* Reset FD */
669 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
670 vq->log_guest_addr = 0;
673 /* Check if all Qs are disabled */
674 int numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
675 for (idx = 0; idx < numqs; idx++)
677 if (xd->vu_vhost_dev.virtqueue[idx]->enabled)
681 /* If all vrings are disabed then disable device */
684 DBG_SOCK ("Device %d disabled", hw_if_index);
685 xd->vu_is_running = 0;
691 static clib_error_t *
692 dpdk_vhost_user_set_vring_base (u32 hw_if_index, u8 idx, u32 num)
695 struct vhost_virtqueue *vq;
697 DBG_SOCK ("idx %u num %u", idx, num);
699 if (!(xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_index)))
701 clib_warning ("not a vhost-user interface");
705 vq = xd->vu_vhost_dev.virtqueue[idx];
706 vq->last_used_idx = num;
707 vq->last_used_idx_res = num;
709 stop_processing_packets (hw_if_index, idx);
714 static clib_error_t *
715 dpdk_vhost_user_set_vring_kick (u32 hw_if_index, u8 idx, int fd)
717 dpdk_main_t *dm = &dpdk_main;
719 dpdk_vu_vring *vring;
720 struct vhost_virtqueue *vq0, *vq1, *vq;
721 int index, vu_is_running = 0;
723 if (!(xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_index)))
725 clib_warning ("not a vhost-user interface");
729 vq = xd->vu_vhost_dev.virtqueue[idx];
732 vring = &xd->vu_intf->vrings[idx];
733 vq->enabled = (vq->desc && vq->avail && vq->used && vring->enabled) ? 1 : 0;
736 * Set xd->vu_is_running if at least one pair of
737 * RX/TX queues are enabled.
739 int numqs = VIRTIO_QNUM;
740 numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
742 for (index = 0; index < numqs; index += 2)
744 vq0 = xd->vu_vhost_dev.virtqueue[index]; /* RX */
745 vq1 = xd->vu_vhost_dev.virtqueue[index + 1]; /* TX */
746 if (vq0->enabled && vq1->enabled)
752 DBG_SOCK ("SET_VRING_KICK - idx %d, running %d, fd: %d",
753 idx, vu_is_running, fd);
755 xd->vu_is_running = vu_is_running;
756 if (xd->vu_is_running && xd->admin_up)
758 vnet_hw_interface_set_flags (dm->vnet_main,
759 xd->vlib_hw_if_index,
760 VNET_HW_INTERFACE_FLAG_LINK_UP |
761 ETH_LINK_FULL_DUPLEX);
768 dpdk_vhost_user_set_vring_enable (u32 hw_if_index, u8 idx, int enable)
771 struct vhost_virtqueue *vq;
774 if (!(xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_index)))
776 clib_warning ("not a vhost-user interface");
782 * Guest vhost driver wrongly enables queue before
783 * setting the vring address. Therefore, save a
784 * local copy. Reflect it in vq structure if addresses
785 * are set. If not, vq will be enabled when vring
788 vui->vrings[idx].enabled = enable; /* Save local copy */
790 int numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
793 if (!vui->vrings[numqs].enabled)
797 if (numqs == -1) /* All Qs are enabled */
802 vq = xd->vu_vhost_dev.virtqueue[idx];
803 if (vq->desc && vq->avail && vq->used)
804 xd->vu_vhost_dev.virtqueue[idx]->enabled = enable;
809 static clib_error_t *
810 dpdk_vhost_user_callfd_read_ready (unix_file_t * uf)
812 __attribute__ ((unused)) int n;
814 n = read (uf->file_descriptor, ((char *) &buff), 8);
818 static clib_error_t *
819 dpdk_vhost_user_set_vring_call (u32 hw_if_index, u8 idx, int fd)
822 struct vhost_virtqueue *vq;
823 unix_file_t template = { 0 };
825 DBG_SOCK ("SET_VRING_CALL - idx %d, fd %d", idx, fd);
827 if (!(xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_index)))
829 clib_warning ("not a vhost-user interface");
833 dpdk_vu_intf_t *vui = xd->vu_intf;
835 /* if there is old fd, delete it */
836 if (vui->vrings[idx].callfd > 0)
838 unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
839 vui->vrings[idx].callfd_idx);
840 unix_file_del (&unix_main, uf);
842 vui->vrings[idx].callfd = fd;
843 template.read_function = dpdk_vhost_user_callfd_read_ready;
844 template.file_descriptor = fd;
845 vui->vrings[idx].callfd_idx = unix_file_add (&unix_main, &template);
847 vq = xd->vu_vhost_dev.virtqueue[idx];
848 vq->callfd = -1; /* We use locally saved vring->callfd; */
854 dpdk_vhost_user_want_interrupt (dpdk_device_t * xd, int idx)
856 dpdk_vu_intf_t *vui = xd->vu_intf;
857 ASSERT (vui != NULL);
859 if (PREDICT_FALSE (vui->num_vrings <= 0))
862 dpdk_vu_vring *vring = &(vui->vrings[idx]);
863 struct vhost_virtqueue *vq = xd->vu_vhost_dev.virtqueue[idx];
865 /* return if vm is interested in interrupts */
866 return (vring->callfd > 0)
867 && !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
871 dpdk_vhost_user_send_interrupt (vlib_main_t * vm, dpdk_device_t * xd, int idx)
873 dpdk_main_t *dm = &dpdk_main;
874 dpdk_vu_intf_t *vui = xd->vu_intf;
875 ASSERT (vui != NULL);
877 if (PREDICT_FALSE (vui->num_vrings <= 0))
880 dpdk_vu_vring *vring = &(vui->vrings[idx]);
881 struct vhost_virtqueue *vq = xd->vu_vhost_dev.virtqueue[idx];
883 /* if vm is interested in interrupts */
884 if ((vring->callfd > 0) && !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
886 eventfd_write (vring->callfd, (eventfd_t) 1);
887 vring->n_since_last_int = 0;
888 vring->int_deadline =
889 vlib_time_now (vm) + dm->conf->vhost_coalesce_time;
894 * vhost-user interface management functions
897 // initialize vui with specified attributes
899 dpdk_vhost_user_vui_init (vnet_main_t * vnm,
900 dpdk_device_t * xd, int sockfd,
901 const char *sock_filename,
902 u8 is_server, u64 feature_mask, u32 * sw_if_index)
904 dpdk_vu_intf_t *vui = xd->vu_intf;
905 memset (vui, 0, sizeof (*vui));
907 vui->unix_fd = sockfd;
908 vui->num_vrings = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
909 DBG_SOCK ("dpdk_vhost_user_vui_init VRINGS: %d", vui->num_vrings);
910 vui->sock_is_server = is_server;
911 strncpy (vui->sock_filename, sock_filename,
912 ARRAY_LEN (vui->sock_filename) - 1);
915 vui->feature_mask = feature_mask;
917 vui->unix_file_index = ~0;
919 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
922 *sw_if_index = xd->vlib_sw_if_index;
925 // register vui and start polling on it
927 dpdk_vhost_user_vui_register (vlib_main_t * vm, dpdk_device_t * xd)
929 dpdk_main_t *dm = &dpdk_main;
930 dpdk_vu_intf_t *vui = xd->vu_intf;
932 hash_set (dm->vu_sw_if_index_by_listener_fd, vui->unix_fd,
933 xd->vlib_sw_if_index);
937 dpdk_unmap_all_mem_regions (dpdk_device_t * xd)
940 dpdk_vu_intf_t *vui = xd->vu_intf;
941 struct virtio_memory *mem = xd->vu_vhost_dev.mem;
943 for (i = 0; i < mem->nregions; i++)
945 if (vui->region_addr[i] != -1)
948 long page_sz = get_huge_page_size (vui->region_fd[i]);
950 ssize_t map_sz = RTE_ALIGN_CEIL (mem->regions[i].memory_size +
951 vui->region_offset[i], page_sz);
954 munmap ((void *) (vui->region_addr[i] - vui->region_offset[i]),
958 ("unmap memory region %d addr 0x%lx off 0x%lx len 0x%lx page_sz 0x%x",
959 i, vui->region_addr[i], vui->region_offset[i], map_sz, page_sz);
961 vui->region_addr[i] = -1;
965 clib_unix_warning ("failed to unmap memory region");
967 close (vui->region_fd[i]);
974 dpdk_vhost_user_if_disconnect (dpdk_device_t * xd)
976 dpdk_vu_intf_t *vui = xd->vu_intf;
977 vnet_main_t *vnm = vnet_get_main ();
978 dpdk_main_t *dm = &dpdk_main;
979 struct vhost_virtqueue *vq;
983 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
985 if (vui->unix_file_index != ~0)
987 unix_file_del (&unix_main, unix_main.file_pool + vui->unix_file_index);
988 vui->unix_file_index = ~0;
991 hash_unset (dm->vu_sw_if_index_by_sock_fd, vui->unix_fd);
992 hash_unset (dm->vu_sw_if_index_by_listener_fd, vui->unix_fd);
993 close (vui->unix_fd);
997 for (q = 0; q < vui->num_vrings; q++)
999 vq = xd->vu_vhost_dev.virtqueue[q];
1000 vui->vrings[q].enabled = 0; /* Reset local copy */
1001 vui->vrings[q].callfd = -1; /* Reset FD */
1003 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
1004 vq->log_guest_addr = 0;
1010 xd->vu_is_running = 0;
1012 dpdk_unmap_all_mem_regions (xd);
1013 DBG_SOCK ("interface ifindex %d disconnected", xd->vlib_sw_if_index);
1016 static clib_error_t *
1017 dpdk_vhost_user_socket_read (unix_file_t * uf)
1020 int fd, number_of_fds = 0;
1021 int fds[VHOST_MEMORY_MAX_NREGIONS];
1022 vhost_user_msg_t msg;
1024 struct iovec iov[1];
1025 dpdk_main_t *dm = &dpdk_main;
1027 dpdk_vu_intf_t *vui;
1028 struct cmsghdr *cmsg;
1031 vnet_main_t *vnm = vnet_get_main ();
1033 p = hash_get (dm->vu_sw_if_index_by_sock_fd, uf->file_descriptor);
1036 DBG_SOCK ("FD %d doesn't belong to any interface", uf->file_descriptor);
1040 xd = dpdk_vhost_user_device_from_sw_if_index (p[0]);
1042 ASSERT (xd != NULL);
1045 char control[CMSG_SPACE (VHOST_MEMORY_MAX_NREGIONS * sizeof (int))];
1047 memset (&mh, 0, sizeof (mh));
1048 memset (control, 0, sizeof (control));
1050 /* set the payload */
1051 iov[0].iov_base = (void *) &msg;
1052 iov[0].iov_len = VHOST_USER_MSG_HDR_SZ;
1056 mh.msg_control = control;
1057 mh.msg_controllen = sizeof (control);
1059 n = recvmsg (uf->file_descriptor, &mh, 0);
1061 if (n != VHOST_USER_MSG_HDR_SZ)
1064 if (mh.msg_flags & MSG_CTRUNC)
1069 cmsg = CMSG_FIRSTHDR (&mh);
1071 if (cmsg && (cmsg->cmsg_len > 0) && (cmsg->cmsg_level == SOL_SOCKET) &&
1072 (cmsg->cmsg_type == SCM_RIGHTS) &&
1073 (cmsg->cmsg_len - CMSG_LEN (0) <=
1074 VHOST_MEMORY_MAX_NREGIONS * sizeof (int)))
1076 number_of_fds = (cmsg->cmsg_len - CMSG_LEN (0)) / sizeof (int);
1077 clib_memcpy (fds, CMSG_DATA (cmsg), number_of_fds * sizeof (int));
1080 /* version 1, no reply bit set */
1081 if ((msg.flags & 7) != 1)
1083 DBG_SOCK ("malformed message received. closing socket");
1088 int rv __attribute__ ((unused));
1089 /* $$$$ pay attention to rv */
1090 rv = read (uf->file_descriptor, ((char *) &msg) + n, msg.size);
1093 DBG_SOCK ("VPP VHOST message %s", vhost_message_str[msg.request]);
1094 switch (msg.request)
1096 case VHOST_USER_GET_FEATURES:
1097 DBG_SOCK ("if %d msg VHOST_USER_GET_FEATURES", xd->vlib_hw_if_index);
1099 msg.flags |= VHOST_USER_REPLY_MASK;
1101 dpdk_vhost_user_get_features (xd->vlib_hw_if_index, &msg.u64);
1102 msg.u64 &= vui->feature_mask;
1103 msg.size = sizeof (msg.u64);
1106 case VHOST_USER_SET_FEATURES:
1107 DBG_SOCK ("if %d msg VHOST_USER_SET_FEATURES features 0x%016lx",
1108 xd->vlib_hw_if_index, msg.u64);
1110 dpdk_vhost_user_set_features (xd->vlib_hw_if_index, msg.u64);
1113 case VHOST_USER_SET_MEM_TABLE:
1114 DBG_SOCK ("if %d msg VHOST_USER_SET_MEM_TABLE nregions %d",
1115 xd->vlib_hw_if_index, msg.memory.nregions);
1117 if ((msg.memory.nregions < 1) ||
1118 (msg.memory.nregions > VHOST_MEMORY_MAX_NREGIONS))
1121 DBG_SOCK ("number of mem regions must be between 1 and %i",
1122 VHOST_MEMORY_MAX_NREGIONS);
1127 if (msg.memory.nregions != number_of_fds)
1129 DBG_SOCK ("each memory region must have FD");
1133 /* Unmap previously configured memory if necessary */
1134 dpdk_unmap_all_mem_regions (xd);
1136 dpdk_vhost_user_set_mem_table (xd->vlib_hw_if_index, &msg.memory, fds);
1139 case VHOST_USER_SET_VRING_NUM:
1140 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_NUM idx %d num %d",
1141 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1143 if ((msg.state.num > 32768) || /* maximum ring size is 32768 */
1144 (msg.state.num == 0) || /* it cannot be zero */
1145 (msg.state.num % 2)) /* must be power of 2 */
1148 dpdk_vhost_user_set_vring_num (xd->vlib_hw_if_index, msg.state.index,
1152 case VHOST_USER_SET_VRING_ADDR:
1153 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ADDR idx %d",
1154 xd->vlib_hw_if_index, msg.state.index);
1156 dpdk_vhost_user_set_vring_addr (xd->vlib_hw_if_index, msg.state.index,
1157 msg.addr.desc_user_addr,
1158 msg.addr.used_user_addr,
1159 msg.addr.avail_user_addr,
1160 msg.addr.log_guest_addr);
1163 case VHOST_USER_SET_OWNER:
1164 DBG_SOCK ("if %d msg VHOST_USER_SET_OWNER", xd->vlib_hw_if_index);
1167 case VHOST_USER_RESET_OWNER:
1168 DBG_SOCK ("if %d msg VHOST_USER_RESET_OWNER", xd->vlib_hw_if_index);
1171 case VHOST_USER_SET_VRING_CALL:
1172 q = (u8) (msg.u64 & 0xFF);
1174 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_CALL u64 %lx, idx: %d",
1175 xd->vlib_hw_if_index, msg.u64, q);
1177 if (!(msg.u64 & 0x100))
1179 if (number_of_fds != 1)
1187 dpdk_vhost_user_set_vring_call (xd->vlib_hw_if_index, q, fd);
1191 case VHOST_USER_SET_VRING_KICK:
1193 q = (u8) (msg.u64 & 0xFF);
1195 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_KICK u64 %lx, idx: %d",
1196 xd->vlib_hw_if_index, msg.u64, q);
1198 if (!(msg.u64 & 0x100))
1200 if (number_of_fds != 1)
1203 vui->vrings[q].kickfd = fds[0];
1206 vui->vrings[q].kickfd = -1;
1208 dpdk_vhost_user_set_vring_kick (xd->vlib_hw_if_index, q,
1209 vui->vrings[q].kickfd);
1212 case VHOST_USER_SET_VRING_ERR:
1214 q = (u8) (msg.u64 & 0xFF);
1216 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ERR u64 %lx, idx: %d",
1217 xd->vlib_hw_if_index, msg.u64, q);
1219 if (!(msg.u64 & 0x100))
1221 if (number_of_fds != 1)
1229 vui->vrings[q].errfd = fd;
1232 case VHOST_USER_SET_VRING_BASE:
1233 DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
1234 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1236 dpdk_vhost_user_set_vring_base (xd->vlib_hw_if_index, msg.state.index,
1240 case VHOST_USER_GET_VRING_BASE:
1241 DBG_SOCK ("if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
1242 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1244 msg.flags |= VHOST_USER_REPLY_MASK;
1245 msg.size = sizeof (msg.state);
1247 dpdk_vhost_user_get_vring_base (xd->vlib_hw_if_index, msg.state.index,
1251 case VHOST_USER_NONE:
1252 DBG_SOCK ("if %d msg VHOST_USER_NONE", xd->vlib_hw_if_index);
1255 case VHOST_USER_SET_LOG_BASE:
1256 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
1257 DBG_SOCK ("if %d msg VHOST_USER_SET_LOG_BASE", xd->vlib_hw_if_index);
1259 if (msg.size != sizeof (msg.log))
1262 ("invalid msg size for VHOST_USER_SET_LOG_BASE: %u instead of %lu",
1263 msg.size, sizeof (msg.log));
1268 (xd->vu_vhost_dev.protocol_features & (1 <<
1269 VHOST_USER_PROTOCOL_F_LOG_SHMFD)))
1272 ("VHOST_USER_PROTOCOL_F_LOG_SHMFD not set but VHOST_USER_SET_LOG_BASE received");
1277 /* align size to 2M page */
1278 long page_sz = get_huge_page_size (fd);
1280 RTE_ALIGN_CEIL (msg.log.size + msg.log.offset, page_sz);
1282 void *addr = mmap (0, map_sz, PROT_READ | PROT_WRITE,
1285 DBG_SOCK ("map log region addr 0 len 0x%lx off 0x%lx fd %d mapped %p",
1286 map_sz, msg.log.offset, fd, addr);
1288 if (addr == MAP_FAILED)
1290 clib_warning ("failed to map memory. errno is %d", errno);
1294 xd->vu_vhost_dev.log_base += pointer_to_uword (addr) + msg.log.offset;
1295 xd->vu_vhost_dev.log_size = msg.log.size;
1296 msg.flags |= VHOST_USER_REPLY_MASK;
1297 msg.size = sizeof (msg.u64);
1299 DBG_SOCK ("if %d msg VHOST_USER_SET_LOG_BASE Not-Implemented",
1300 xd->vlib_hw_if_index);
1304 case VHOST_USER_SET_LOG_FD:
1305 DBG_SOCK ("if %d msg VHOST_USER_SET_LOG_FD", xd->vlib_hw_if_index);
1308 case VHOST_USER_GET_PROTOCOL_FEATURES:
1309 DBG_SOCK ("if %d msg VHOST_USER_GET_PROTOCOL_FEATURES",
1310 xd->vlib_hw_if_index);
1312 msg.flags |= VHOST_USER_REPLY_MASK;
1313 msg.u64 = VHOST_USER_PROTOCOL_FEATURES;
1314 DBG_SOCK ("VHOST_USER_PROTOCOL_FEATURES: %llx",
1315 VHOST_USER_PROTOCOL_FEATURES);
1316 msg.size = sizeof (msg.u64);
1319 case VHOST_USER_SET_PROTOCOL_FEATURES:
1320 DBG_SOCK ("if %d msg VHOST_USER_SET_PROTOCOL_FEATURES",
1321 xd->vlib_hw_if_index);
1323 DBG_SOCK ("VHOST_USER_SET_PROTOCOL_FEATURES: 0x%lx", msg.u64);
1324 dpdk_vhost_user_set_protocol_features (xd->vlib_hw_if_index, msg.u64);
1327 case VHOST_USER_SET_VRING_ENABLE:
1328 DBG_SOCK ("%d VPP VHOST_USER_SET_VRING_ENABLE IDX: %d, Enable: %d",
1329 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1330 dpdk_vhost_user_set_vring_enable
1331 (xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1334 case VHOST_USER_GET_QUEUE_NUM:
1335 DBG_SOCK ("if %d msg VHOST_USER_GET_QUEUE_NUM:", xd->vlib_hw_if_index);
1337 msg.flags |= VHOST_USER_REPLY_MASK;
1338 msg.u64 = xd->vu_vhost_dev.virt_qp_nb;
1339 msg.size = sizeof (msg.u64);
1343 DBG_SOCK ("unknown vhost-user message %d received. closing socket",
1348 /* if we have pointers to descriptor table, go up */
1350 xd->vu_vhost_dev.virtqueue[VHOST_NET_VRING_IDX_TX]->desc &&
1351 xd->vu_vhost_dev.virtqueue[VHOST_NET_VRING_IDX_RX]->desc)
1354 DBG_SOCK ("interface %d connected", xd->vlib_sw_if_index);
1356 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index,
1357 VNET_HW_INTERFACE_FLAG_LINK_UP);
1362 /* if we need to reply */
1363 if (msg.flags & VHOST_USER_REPLY_MASK)
1366 send (uf->file_descriptor, &msg, VHOST_USER_MSG_HDR_SZ + msg.size, 0);
1367 if (n != (msg.size + VHOST_USER_MSG_HDR_SZ))
1374 DBG_SOCK ("error: close_socket");
1375 dpdk_vhost_user_if_disconnect (xd);
1379 static clib_error_t *
1380 dpdk_vhost_user_socket_error (unix_file_t * uf)
1382 dpdk_main_t *dm = &dpdk_main;
1386 p = hash_get (dm->vu_sw_if_index_by_sock_fd, uf->file_descriptor);
1389 DBG_SOCK ("FD %d doesn't belong to any interface", uf->file_descriptor);
1393 xd = dpdk_vhost_user_device_from_sw_if_index (p[0]);
1395 dpdk_vhost_user_if_disconnect (xd);
1399 static clib_error_t *
1400 dpdk_vhost_user_socksvr_accept_ready (unix_file_t * uf)
1402 int client_fd, client_len;
1403 struct sockaddr_un client;
1404 unix_file_t template = { 0 };
1405 dpdk_main_t *dm = &dpdk_main;
1406 dpdk_device_t *xd = NULL;
1407 dpdk_vu_intf_t *vui;
1410 p = hash_get (dm->vu_sw_if_index_by_listener_fd, uf->file_descriptor);
1413 DBG_SOCK ("fd %d doesn't belong to any interface", uf->file_descriptor);
1417 xd = dpdk_vhost_user_device_from_sw_if_index (p[0]);
1418 ASSERT (xd != NULL);
1421 client_len = sizeof (client);
1422 client_fd = accept (uf->file_descriptor,
1423 (struct sockaddr *) &client,
1424 (socklen_t *) & client_len);
1427 return clib_error_return_unix (0, "accept");
1429 template.read_function = dpdk_vhost_user_socket_read;
1430 template.error_function = dpdk_vhost_user_socket_error;
1431 template.file_descriptor = client_fd;
1432 vui->unix_file_index = unix_file_add (&unix_main, &template);
1434 vui->client_fd = client_fd;
1435 hash_set (dm->vu_sw_if_index_by_sock_fd, vui->client_fd,
1436 xd->vlib_sw_if_index);
1441 // init server socket on specified sock_filename
1443 dpdk_vhost_user_init_server_sock (const char *sock_filename, int *sockfd)
1446 struct sockaddr_un un = { };
1448 /* create listening socket */
1449 fd = socket (AF_UNIX, SOCK_STREAM, 0);
1453 return VNET_API_ERROR_SYSCALL_ERROR_1;
1456 un.sun_family = AF_UNIX;
1457 strcpy ((char *) un.sun_path, (char *) sock_filename);
1459 /* remove if exists */
1460 unlink ((char *) sock_filename);
1462 if (bind (fd, (struct sockaddr *) &un, sizeof (un)) == -1)
1464 rv = VNET_API_ERROR_SYSCALL_ERROR_2;
1468 if (listen (fd, 1) == -1)
1470 rv = VNET_API_ERROR_SYSCALL_ERROR_3;
1474 unix_file_t template = { 0 };
1475 template.read_function = dpdk_vhost_user_socksvr_accept_ready;
1476 template.file_descriptor = fd;
1477 unix_file_add (&unix_main, &template);
1487 * vhost-user interface control functions used from vpe api
1491 dpdk_vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
1492 const char *sock_filename,
1496 u8 renumber, u32 custom_dev_instance, u8 * hwaddr)
1498 dpdk_main_t *dm = &dpdk_main;
1504 // using virtio vhost user?
1505 if (dm->conf->use_virtio_vhost)
1507 return vhost_user_create_if (vnm, vm, sock_filename, is_server,
1508 sw_if_index, feature_mask, renumber,
1509 custom_dev_instance, hwaddr);
1515 dpdk_vhost_user_init_server_sock (sock_filename, &sockfd)) != 0)
1523 // set next vhost-user if id if custom one is higher or equal
1524 if (custom_dev_instance >= dm->next_vu_if_id)
1525 dm->next_vu_if_id = custom_dev_instance + 1;
1527 dpdk_create_vhost_user_if_internal (&hw_if_idx, custom_dev_instance,
1531 dpdk_create_vhost_user_if_internal (&hw_if_idx, (u32) ~ 0, hwaddr);
1532 DBG_SOCK ("dpdk vhost-user interface created hw_if_index %d", hw_if_idx);
1534 xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_idx);
1535 ASSERT (xd != NULL);
1537 dpdk_vhost_user_vui_init (vnm, xd, sockfd, sock_filename, is_server,
1538 feature_mask, sw_if_index);
1540 dpdk_vhost_user_vui_register (vm, xd);
1545 dpdk_vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
1546 const char *sock_filename,
1550 u8 renumber, u32 custom_dev_instance)
1552 dpdk_main_t *dm = &dpdk_main;
1554 dpdk_vu_intf_t *vui = NULL;
1559 // using virtio vhost user?
1560 if (dm->conf->use_virtio_vhost)
1562 return vhost_user_modify_if (vnm, vm, sock_filename, is_server,
1563 sw_if_index, feature_mask, renumber,
1564 custom_dev_instance);
1567 xd = dpdk_vhost_user_device_from_sw_if_index (sw_if_index);
1570 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
1574 // interface is inactive
1576 // disconnect interface sockets
1577 dpdk_vhost_user_if_disconnect (xd);
1582 dpdk_vhost_user_init_server_sock (sock_filename, &sockfd)) != 0)
1588 dpdk_vhost_user_vui_init (vnm, xd, sockfd, sock_filename, is_server,
1589 feature_mask, &sw_if_idx);
1593 vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
1596 dpdk_vhost_user_vui_register (vm, xd);
1602 dpdk_vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm,
1605 dpdk_main_t *dm = &dpdk_main;
1606 dpdk_device_t *xd = NULL;
1607 dpdk_vu_intf_t *vui;
1610 // using virtio vhost user?
1611 if (dm->conf->use_virtio_vhost)
1613 return vhost_user_delete_if (vnm, vm, sw_if_index);
1616 xd = dpdk_vhost_user_device_from_sw_if_index (sw_if_index);
1619 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
1623 // interface is inactive
1625 // disconnect interface sockets
1626 dpdk_vhost_user_if_disconnect (xd);
1627 // add to inactive interface list
1628 vec_add1 (dm->vu_inactive_interfaces_device_index, xd->device_index);
1630 ethernet_delete_interface (vnm, xd->vlib_hw_if_index);
1631 DBG_SOCK ("deleted (deactivated) vhost-user interface sw_if_index %d",
1638 dpdk_vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm,
1639 vhost_user_intf_details_t ** out_vuids)
1642 dpdk_main_t *dm = &dpdk_main;
1644 dpdk_vu_intf_t *vui;
1645 struct virtio_net *vhost_dev;
1646 vhost_user_intf_details_t *r_vuids = NULL;
1647 vhost_user_intf_details_t *vuid = NULL;
1648 u32 *hw_if_indices = 0;
1649 vnet_hw_interface_t *hi;
1656 // using virtio vhost user?
1657 if (dm->conf->use_virtio_vhost)
1659 return vhost_user_dump_ifs (vnm, vm, out_vuids);
1662 vec_foreach (xd, dm->devices)
1664 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER && xd->vu_intf->active)
1665 vec_add1 (hw_if_indices, xd->vlib_hw_if_index);
1668 for (i = 0; i < vec_len (hw_if_indices); i++)
1670 hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
1671 xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_indices[i]);
1674 clib_warning ("invalid vhost-user interface hw_if_index %d",
1680 ASSERT (vui != NULL);
1681 vhost_dev = &xd->vu_vhost_dev;
1682 u32 virtio_net_hdr_sz = (vui->num_vrings > 0 ?
1683 vhost_dev->virtqueue[0]->vhost_hlen : 0);
1685 vec_add2 (r_vuids, vuid, 1);
1686 vuid->sw_if_index = xd->vlib_sw_if_index;
1687 vuid->virtio_net_hdr_sz = virtio_net_hdr_sz;
1688 vuid->features = vhost_dev->features;
1689 vuid->is_server = vui->sock_is_server;
1691 (vhost_dev->mem != NULL ? vhost_dev->mem->nregions : 0);
1692 vuid->sock_errno = vui->sock_errno;
1693 strncpy ((char *) vuid->sock_filename, (char *) vui->sock_filename,
1694 ARRAY_LEN (vuid->sock_filename) - 1);
1696 s = format (s, "%v%c", hi->name, 0);
1698 strncpy ((char *) vuid->if_name, (char *) s,
1699 ARRAY_LEN (vuid->if_name) - 1);
1704 vec_free (hw_if_indices);
1706 *out_vuids = r_vuids;
1712 * Processing functions called from dpdk process fn
1717 struct sockaddr_un sun;
1719 unix_file_t template;
1721 } dpdk_vu_process_state;
1724 dpdk_vhost_user_process_init (void **ctx)
1726 dpdk_vu_process_state *state =
1727 clib_mem_alloc (sizeof (dpdk_vu_process_state));
1728 memset (state, 0, sizeof (*state));
1729 state->sockfd = socket (AF_UNIX, SOCK_STREAM, 0);
1730 state->sun.sun_family = AF_UNIX;
1731 state->template.read_function = dpdk_vhost_user_socket_read;
1732 state->template.error_function = dpdk_vhost_user_socket_error;
1733 state->event_data = 0;
1738 dpdk_vhost_user_process_cleanup (void *ctx)
1740 clib_mem_free (ctx);
1744 dpdk_vhost_user_process_if (vlib_main_t * vm, dpdk_device_t * xd, void *ctx)
1746 dpdk_main_t *dm = &dpdk_main;
1747 dpdk_vu_process_state *state = (dpdk_vu_process_state *) ctx;
1748 dpdk_vu_intf_t *vui = xd->vu_intf;
1750 if (vui->sock_is_server || !vui->active)
1753 if (vui->unix_fd == -1)
1755 /* try to connect */
1756 strncpy (state->sun.sun_path, (char *) vui->sock_filename,
1757 sizeof (state->sun.sun_path) - 1);
1760 (state->sockfd, (struct sockaddr *) &(state->sun),
1761 sizeof (struct sockaddr_un)) == 0)
1763 vui->sock_errno = 0;
1764 vui->unix_fd = state->sockfd;
1765 state->template.file_descriptor = state->sockfd;
1766 vui->unix_file_index =
1767 unix_file_add (&unix_main, &(state->template));
1768 hash_set (dm->vu_sw_if_index_by_sock_fd, state->sockfd,
1769 xd->vlib_sw_if_index);
1771 state->sockfd = socket (AF_UNIX, SOCK_STREAM, 0);
1772 if (state->sockfd < 0)
1777 vui->sock_errno = errno;
1782 /* check if socket is alive */
1784 socklen_t len = sizeof (error);
1786 getsockopt (vui->unix_fd, SOL_SOCKET, SO_ERROR, &error, &len);
1789 dpdk_vhost_user_if_disconnect (xd);
1798 static clib_error_t *
1799 dpdk_vhost_user_connect_command_fn (vlib_main_t * vm,
1800 unformat_input_t * input,
1801 vlib_cli_command_t * cmd)
1803 dpdk_main_t *dm = &dpdk_main;
1804 unformat_input_t _line_input, *line_input = &_line_input;
1805 u8 *sock_filename = NULL;
1808 u64 feature_mask = (u64) ~ 0;
1810 u32 custom_dev_instance = ~0;
1814 if (dm->conf->use_virtio_vhost)
1816 return vhost_user_connect_command_fn (vm, input, cmd);
1819 /* Get a line of input. */
1820 if (!unformat_user (input, unformat_line_input, line_input))
1823 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1825 if (unformat (line_input, "socket %s", &sock_filename))
1827 else if (unformat (line_input, "server"))
1829 else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
1833 (line_input, "hwaddr %U", unformat_ethernet_address, hwaddr))
1835 else if (unformat (line_input, "renumber %d", &custom_dev_instance))
1840 return clib_error_return (0, "unknown input `%U'",
1841 format_unformat_error, input);
1843 unformat_free (line_input);
1845 vnet_main_t *vnm = vnet_get_main ();
1846 if (sock_filename == NULL)
1847 return clib_error_return (0, "missing socket file");
1849 dpdk_vhost_user_create_if (vnm, vm, (char *) sock_filename,
1850 is_server, &sw_if_index, feature_mask,
1851 renumber, custom_dev_instance, hw);
1853 vec_free (sock_filename);
1854 vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
1860 VLIB_CLI_COMMAND (dpdk_vhost_user_connect_command, static) = {
1861 .path = "create vhost-user",
1862 .short_help = "create vhost-user socket <socket-filename> [server] [feature-mask <hex>] [renumber <dev_instance>]",
1863 .function = dpdk_vhost_user_connect_command_fn,
1867 static clib_error_t *
1868 dpdk_vhost_user_delete_command_fn (vlib_main_t * vm,
1869 unformat_input_t * input,
1870 vlib_cli_command_t * cmd)
1872 dpdk_main_t *dm = &dpdk_main;
1873 clib_error_t *error = 0;
1874 unformat_input_t _line_input, *line_input = &_line_input;
1875 u32 sw_if_index = ~0;
1877 if (dm->conf->use_virtio_vhost)
1879 return vhost_user_delete_command_fn (vm, input, cmd);
1882 /* Get a line of input. */
1883 if (!unformat_user (input, unformat_line_input, line_input))
1886 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1888 if (unformat (line_input, "sw_if_index %d", &sw_if_index))
1891 return clib_error_return (0, "unknown input `%U'",
1892 format_unformat_error, input);
1894 unformat_free (line_input);
1896 if (sw_if_index == ~0)
1898 error = clib_error_return (0, "invalid sw_if_index",
1899 format_unformat_error, input);
1903 vnet_main_t *vnm = vnet_get_main ();
1905 dpdk_vhost_user_delete_if (vnm, vm, sw_if_index);
1911 VLIB_CLI_COMMAND (dpdk_vhost_user_delete_command, static) = {
1912 .path = "delete vhost-user",
1913 .short_help = "delete vhost-user sw_if_index <nn>",
1914 .function = dpdk_vhost_user_delete_command_fn,
1918 #define foreach_dpdk_vhost_feature \
1919 _ (VIRTIO_NET_F_MRG_RXBUF) \
1920 _ (VIRTIO_NET_F_CTRL_VQ) \
1921 _ (VIRTIO_NET_F_CTRL_RX)
1923 static clib_error_t *
1924 show_dpdk_vhost_user_command_fn (vlib_main_t * vm,
1925 unformat_input_t * input,
1926 vlib_cli_command_t * cmd)
1928 clib_error_t *error = 0;
1929 dpdk_main_t *dm = &dpdk_main;
1930 vnet_main_t *vnm = vnet_get_main ();
1932 dpdk_vu_intf_t *vui;
1933 struct virtio_net *vhost_dev;
1934 u32 hw_if_index, *hw_if_indices = 0;
1935 vnet_hw_interface_t *hi;
1938 struct virtio_memory *mem;
1944 struct feat_struct *feat_entry;
1946 static struct feat_struct feat_array[] = {
1947 #define _(f) { .str = #f, .bit = f, },
1948 foreach_dpdk_vhost_feature
1953 if (dm->conf->use_virtio_vhost)
1955 return show_vhost_user_command_fn (vm, input, cmd);
1958 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1961 (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index))
1963 vec_add1 (hw_if_indices, hw_if_index);
1964 vlib_cli_output (vm, "add %d", hw_if_index);
1966 else if (unformat (input, "descriptors") || unformat (input, "desc"))
1970 error = clib_error_return (0, "unknown input `%U'",
1971 format_unformat_error, input);
1975 if (vec_len (hw_if_indices) == 0)
1977 vec_foreach (xd, dm->devices)
1979 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER && xd->vu_intf->active)
1980 vec_add1 (hw_if_indices, xd->vlib_hw_if_index);
1984 vlib_cli_output (vm, "DPDK vhost-user interfaces");
1985 vlib_cli_output (vm, "Global:\n coalesce frames %d time %e\n\n",
1986 dm->conf->vhost_coalesce_frames,
1987 dm->conf->vhost_coalesce_time);
1989 for (i = 0; i < vec_len (hw_if_indices); i++)
1991 hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
1993 if (!(xd = dpdk_vhost_user_device_from_hw_if_index (hw_if_indices[i])))
1995 error = clib_error_return (0, "not dpdk vhost-user interface: '%s'",
2000 vhost_dev = &xd->vu_vhost_dev;
2001 mem = vhost_dev->mem;
2002 u32 virtio_net_hdr_sz = (vui->num_vrings > 0 ?
2003 vhost_dev->virtqueue[0]->vhost_hlen : 0);
2005 vlib_cli_output (vm, "Interface: %v (ifindex %d)",
2006 hi->name, hw_if_indices[i]);
2008 vlib_cli_output (vm, "virtio_net_hdr_sz %d\n features (0x%llx): \n",
2009 virtio_net_hdr_sz, xd->vu_vhost_dev.features);
2011 feat_entry = (struct feat_struct *) &feat_array;
2012 while (feat_entry->str)
2014 if (xd->vu_vhost_dev.features & (1 << feat_entry->bit))
2015 vlib_cli_output (vm, " %s (%d)", feat_entry->str,
2020 vlib_cli_output (vm, "\n");
2022 vlib_cli_output (vm, " socket filename %s type %s errno \"%s\"\n\n",
2024 vui->sock_is_server ? "server" : "client",
2025 strerror (vui->sock_errno));
2027 vlib_cli_output (vm, " Memory regions (total %d)\n", mem->nregions);
2031 vlib_cli_output (vm,
2032 " region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr\n");
2033 vlib_cli_output (vm,
2034 " ====== ===== ================== ================== ================== ================== ==================\n");
2036 for (j = 0; j < mem->nregions; j++)
2038 vlib_cli_output (vm,
2039 " %d %-5d 0x%016lx 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
2040 j, vui->region_fd[j],
2041 mem->regions[j].guest_phys_address,
2042 mem->regions[j].memory_size,
2043 mem->regions[j].userspace_address,
2044 mem->regions[j].address_offset,
2045 vui->region_addr[j]);
2047 for (q = 0; q < vui->num_vrings; q++)
2049 struct vhost_virtqueue *vq = vhost_dev->virtqueue[q];
2050 const char *qtype = (q & 1) ? "TX" : "RX";
2052 vlib_cli_output (vm, "\n Virtqueue %d (%s)\n", q / 2, qtype);
2054 vlib_cli_output (vm,
2055 " qsz %d last_used_idx %d last_used_idx_res %d\n",
2056 vq->size, vq->last_used_idx,
2057 vq->last_used_idx_res);
2059 if (vq->avail && vq->used)
2060 vlib_cli_output (vm,
2061 " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
2062 vq->avail->flags, vq->avail->idx,
2063 vq->used->flags, vq->used->idx);
2065 vlib_cli_output (vm, " kickfd %d callfd %d errfd %d enabled %d\n",
2066 vq->kickfd, vq->callfd, vui->vrings[q].errfd,
2069 if (show_descr && vq->enabled)
2071 vlib_cli_output (vm, "\n descriptor table:\n");
2072 vlib_cli_output (vm,
2073 " id addr len flags next user_addr\n");
2074 vlib_cli_output (vm,
2075 " ===== ================== ===== ====== ===== ==================\n");
2076 for (j = 0; j < vq->size; j++)
2078 vlib_cli_output (vm,
2079 " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
2080 j, vq->desc[j].addr, vq->desc[j].len,
2081 vq->desc[j].flags, vq->desc[j].next,
2082 pointer_to_uword (map_guest_mem
2083 (xd, vq->desc[j].addr)));
2087 vlib_cli_output (vm, "\n");
2090 vec_free (hw_if_indices);
2095 VLIB_CLI_COMMAND (show_vhost_user_command, static) = {
2096 .path = "show vhost-user",
2097 .short_help = "show vhost-user interface",
2098 .function = show_dpdk_vhost_user_command_fn,
2104 * fd.io coding-style-patch-verification: ON
2107 * eval: (c-set-style "gnu")