2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <sys/socket.h>
21 #include <vlib/vlib.h>
22 #include <vlib/unix/unix.h>
24 #include <vnet/vnet.h>
25 #include <vppinfra/vec.h>
26 #include <vppinfra/error.h>
27 #include <vppinfra/format.h>
29 #include <vnet/ethernet/ethernet.h>
30 #include <vnet/devices/dpdk/dpdk.h>
32 #include <vnet/devices/virtio/vhost-user.h>
34 #define VHOST_USER_DEBUG_SOCKET 0
36 #if VHOST_USER_DEBUG_SOCKET == 1
37 #define DBG_SOCK(args...) clib_warning(args);
39 #define DBG_SOCK(args...)
44 static const char *vhost_message_str[] __attribute__((unused)) = {
45 [VHOST_USER_NONE] = "VHOST_USER_NONE",
46 [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
47 [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
48 [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
49 [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
50 [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
51 [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
52 [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
53 [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
54 [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
55 [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
56 [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
57 [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
58 [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
59 [VHOST_USER_SET_VRING_ERR] = "VHOST_USER_SET_VRING_ERR",
60 [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
61 [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
62 [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
63 [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
66 static int dpdk_vhost_user_set_vring_enable(u32 hw_if_index,
70 * DPDK vhost-user functions
73 /* portions taken from dpdk
76 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
77 * All rights reserved.
79 * Redistribution and use in source and binary forms, with or without
80 * modification, are permitted provided that the following conditions
83 * * Redistributions of source code must retain the above copyright
84 * notice, this list of conditions and the following disclaimer.
85 * * Redistributions in binary form must reproduce the above copyright
86 * notice, this list of conditions and the following disclaimer in
87 * the documentation and/or other materials provided with the
89 * * Neither the name of Intel Corporation nor the names of its
90 * contributors may be used to endorse or promote products derived
91 * from this software without specific prior written permission.
93 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
94 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
95 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
96 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
97 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
98 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
99 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
100 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
101 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
102 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
103 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
108 qva_to_vva(struct virtio_net *dev, uword qemu_va)
110 struct virtio_memory_regions *region;
112 uint32_t regionidx = 0;
114 /* Find the region where the address lives. */
115 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
116 region = &dev->mem->regions[regionidx];
117 if ((qemu_va >= region->userspace_address) &&
118 (qemu_va <= region->userspace_address +
119 region->memory_size)) {
120 vhost_va = qemu_va + region->guest_phys_address +
121 region->address_offset -
122 region->userspace_address;
129 static dpdk_device_t *
130 dpdk_vhost_user_device_from_hw_if_index(u32 hw_if_index)
132 vnet_main_t *vnm = vnet_get_main();
133 dpdk_main_t * dm = &dpdk_main;
134 vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
135 dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
137 if (xd->dev_type != VNET_DPDK_DEV_VHOST_USER)
143 static dpdk_device_t *
144 dpdk_vhost_user_device_from_sw_if_index(u32 sw_if_index)
146 vnet_main_t *vnm = vnet_get_main();
147 vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, sw_if_index);
148 ASSERT (sw->type == VNET_SW_INTERFACE_TYPE_HARDWARE);
150 return dpdk_vhost_user_device_from_hw_if_index(sw->hw_if_index);
153 static void stop_processing_packets(u32 hw_if_index, u8 idx)
156 dpdk_vhost_user_device_from_hw_if_index(hw_if_index);
158 xd->vu_vhost_dev.virtqueue[idx]->enabled = 0;
161 static void disable_interface(dpdk_device_t * xd)
164 int numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
165 for (idx = 0; idx < numqs; idx++)
166 xd->vu_vhost_dev.virtqueue[idx]->enabled = 0;
168 xd->vu_is_running = 0;
171 static inline void * map_guest_mem(dpdk_device_t * xd, uword addr)
173 dpdk_vu_intf_t * vui = xd->vu_intf;
174 struct virtio_memory * mem = xd->vu_vhost_dev.mem;
176 for (i=0; i<mem->nregions; i++) {
177 if ((mem->regions[i].guest_phys_address <= addr) &&
178 ((mem->regions[i].guest_phys_address + mem->regions[i].memory_size) > addr)) {
179 return (void *) ((uword)vui->region_addr[i] + addr - (uword)mem->regions[i].guest_phys_address);
182 DBG_SOCK("failed to map guest mem addr %lx", addr);
186 static clib_error_t *
187 dpdk_create_vhost_user_if_internal (u32 * hw_if_index, u32 if_id, u8 *hwaddr)
189 dpdk_main_t * dm = &dpdk_main;
190 vlib_main_t * vm = vlib_get_main();
191 vlib_thread_main_t * tm = vlib_get_thread_main();
192 vnet_sw_interface_t * sw;
193 clib_error_t * error;
194 dpdk_device_and_queue_t * dq;
196 dpdk_vu_intf_t *vui = NULL;
198 num_qpairs = dm->use_rss < 1 ? 1 : tm->n_vlib_mains;
200 dpdk_device_t * xd = NULL;
204 vlib_worker_thread_barrier_sync (vm);
206 int inactive_cnt = vec_len(dm->vu_inactive_interfaces_device_index);
207 // if there are any inactive ifaces
208 if (inactive_cnt > 0) {
210 u32 vui_idx = dm->vu_inactive_interfaces_device_index[inactive_cnt - 1];
211 if (vec_len(dm->devices) > vui_idx) {
212 xd = vec_elt_at_index (dm->devices, vui_idx);
213 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER) {
214 DBG_SOCK("reusing inactive vhost-user interface sw_if_index %d", xd->vlib_sw_if_index);
216 clib_warning("error: inactive vhost-user interface sw_if_index %d not VHOST_USER type!",
217 xd->vlib_sw_if_index);
218 // reset so new interface is created
222 // "remove" from inactive list
223 _vec_len(dm->vu_inactive_interfaces_device_index) -= 1;
227 // existing interface used - do not overwrite if_id if not needed
228 if (if_id != (u32)~0)
229 xd->vu_if_id = if_id;
233 for (j = 0; j < num_qpairs * VIRTIO_QNUM; j++) {
234 memset(xd->vu_vhost_dev.virtqueue[j], 0, sizeof(struct vhost_virtqueue));
235 xd->vu_vhost_dev.virtqueue[j]->kickfd = -1;
236 xd->vu_vhost_dev.virtqueue[j]->callfd = -1;
237 xd->vu_vhost_dev.virtqueue[j]->backend = -1;
238 vui->vrings[j].packets = 0;
239 vui->vrings[j].bytes = 0;
243 dpdk_device_lock_free(xd);
244 dpdk_device_lock_init(xd);
247 for (j = 0; j < tm->n_vlib_mains; j++)
249 vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE,
250 sizeof(tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES);
251 vec_reset_length (xd->tx_vectors[j]);
255 for (j = 0; j < xd->rx_q_used; j++)
257 vec_validate_aligned (xd->rx_vectors[j], VLIB_FRAME_SIZE-1,
258 CLIB_CACHE_LINE_BYTES);
259 vec_reset_length (xd->rx_vectors[j]);
262 // vui was not retrieved from inactive ifaces - create new
263 vec_add2_aligned (dm->devices, xd, 1, CLIB_CACHE_LINE_BYTES);
264 xd->dev_type = VNET_DPDK_DEV_VHOST_USER;
265 xd->rx_q_used = num_qpairs;
266 xd->tx_q_used = num_qpairs;
267 xd->vu_vhost_dev.virt_qp_nb = num_qpairs;
269 vec_validate_aligned (xd->rx_vectors, xd->rx_q_used, CLIB_CACHE_LINE_BYTES);
271 if (if_id == (u32)~0)
272 xd->vu_if_id = dm->next_vu_if_id++;
274 xd->vu_if_id = if_id;
276 xd->device_index = xd - dm->devices;
277 xd->per_interface_next_index = ~0;
278 xd->vu_intf = clib_mem_alloc (sizeof(*(xd->vu_intf)));
280 xd->vu_vhost_dev.mem = clib_mem_alloc (sizeof(struct virtio_memory) +
281 VHOST_MEMORY_MAX_NREGIONS *
282 sizeof(struct virtio_memory_regions));
284 /* Will be set when guest sends VHOST_USER_SET_MEM_TABLE cmd */
285 xd->vu_vhost_dev.mem->nregions = 0;
288 * New virtqueue structure is an array of VHOST_MAX_QUEUE_PAIRS * 2
289 * We need to allocate numq pairs.
292 for (j = 0; j < num_qpairs * VIRTIO_QNUM; j++) {
293 xd->vu_vhost_dev.virtqueue[j] = clib_mem_alloc (sizeof(struct vhost_virtqueue));
294 memset(xd->vu_vhost_dev.virtqueue[j], 0, sizeof(struct vhost_virtqueue));
295 xd->vu_vhost_dev.virtqueue[j]->kickfd = -1;
296 xd->vu_vhost_dev.virtqueue[j]->callfd = -1;
297 xd->vu_vhost_dev.virtqueue[j]->backend = -1;
298 vui->vrings[j].packets = 0;
299 vui->vrings[j].bytes = 0;
302 dpdk_device_lock_init(xd);
304 DBG_SOCK("tm->n_vlib_mains: %d. TX %d, RX: %d, num_qpairs: %d, Lock: %p",
305 tm->n_vlib_mains, xd->tx_q_used, xd->rx_q_used, num_qpairs, xd->lockp);
307 vec_validate_aligned (xd->tx_vectors, tm->n_vlib_mains,
308 CLIB_CACHE_LINE_BYTES);
310 for (j = 0; j < tm->n_vlib_mains; j++)
312 vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE,
313 sizeof(tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES);
314 vec_reset_length (xd->tx_vectors[j]);
318 for (j = 0; j < xd->rx_q_used; j++)
320 vec_validate_aligned (xd->rx_vectors[j], VLIB_FRAME_SIZE-1,
321 CLIB_CACHE_LINE_BYTES);
322 vec_reset_length (xd->rx_vectors[j]);
325 vec_validate_aligned (xd->frames, tm->n_vlib_mains,
326 CLIB_CACHE_LINE_BYTES);
330 * Generate random MAC address for the interface
333 clib_memcpy(addr, hwaddr, sizeof(addr));
335 f64 now = vlib_time_now(vm);
337 rnd = (u32) (now * 1e6);
338 rnd = random_u32 (&rnd);
340 clib_memcpy (addr+2, &rnd, sizeof(rnd));
345 error = ethernet_register_interface
347 dpdk_device_class.index,
349 /* ethernet address */ addr,
350 &xd->vlib_hw_if_index,
356 sw = vnet_get_hw_sw_interface (dm->vnet_main, xd->vlib_hw_if_index);
357 xd->vlib_sw_if_index = sw->sw_if_index;
359 *hw_if_index = xd->vlib_hw_if_index;
361 DBG_SOCK("xd->device_index: %d, dm->input_cpu_count: %d, "
362 "dm->input_cpu_first_index: %d\n", xd->device_index,
363 dm->input_cpu_count, dm->input_cpu_first_index);
366 for (q = 0; q < num_qpairs; q++) {
367 int cpu = dm->input_cpu_first_index +
368 (next_cpu % dm->input_cpu_count);
370 unsigned lcore = vlib_worker_threads[cpu].dpdk_lcore_id;
371 vec_validate(xd->cpu_socket_id_by_queue, q);
372 xd->cpu_socket_id_by_queue[q] = rte_lcore_to_socket_id(lcore);
374 vec_add2(dm->devices_by_cpu[cpu], dq, 1);
375 dq->device = xd->device_index;
377 DBG_SOCK("CPU for %d = %d. QID: %d", *hw_if_index, cpu, dq->queue_id);
379 // start polling if it was not started yet (because of no phys ifaces)
380 if (tm->n_vlib_mains == 1 && dpdk_input_node.state != VLIB_NODE_STATE_POLLING)
381 vlib_node_set_state (vm, dpdk_input_node.index, VLIB_NODE_STATE_POLLING);
383 if (tm->n_vlib_mains > 1)
384 vlib_node_set_state (vlib_mains[cpu], dpdk_input_node.index,
385 VLIB_NODE_STATE_POLLING);
389 vlib_worker_thread_barrier_release (vm);
393 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
394 static long get_huge_page_size(int fd)
402 static clib_error_t *
403 dpdk_vhost_user_set_protocol_features(u32 hw_if_index, u64 prot_features)
406 xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index);
408 xd->vu_vhost_dev.protocol_features = prot_features;
412 static clib_error_t *
413 dpdk_vhost_user_get_features(u32 hw_if_index, u64 * features)
415 *features = rte_vhost_feature_get();
417 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
418 #define OFFLOAD_FEATURES ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
419 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
420 (1ULL << VIRTIO_NET_F_CSUM) | \
421 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
422 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
423 (1ULL << VIRTIO_NET_F_GUEST_TSO6))
425 /* These are not suppoted as bridging/tunneling VHOST
426 * interfaces with hardware interfaces/drivers that does
427 * not support offloading breaks L4 traffic.
429 *features &= (~OFFLOAD_FEATURES);
432 DBG_SOCK("supported features: 0x%lx", *features);
436 static clib_error_t *
437 dpdk_vhost_user_set_features(u32 hw_if_index, u64 features)
440 u16 hdr_len = sizeof(struct virtio_net_hdr);
443 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
444 clib_warning("not a vhost-user interface");
448 xd->vu_vhost_dev.features = features;
450 if (xd->vu_vhost_dev.features & (1 << VIRTIO_NET_F_MRG_RXBUF))
451 hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
453 int numqs = VIRTIO_QNUM;
455 int prot_feature = features &
456 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
457 numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
458 for (idx = 0; idx < numqs; idx++) {
459 xd->vu_vhost_dev.virtqueue[idx]->vhost_hlen = hdr_len;
461 * Spec says, if F_PROTOCOL_FEATURE is not set by the
462 * slave, then all the vrings should start off as
463 * enabled. If slave negotiates F_PROTOCOL_FEATURE, then
464 * slave is responsible to enable it.
467 dpdk_vhost_user_set_vring_enable(hw_if_index, idx, 1);
473 static clib_error_t *
474 dpdk_vhost_user_set_mem_table(u32 hw_if_index, vhost_user_memory_t * vum, int fd[])
476 struct virtio_memory * mem;
479 dpdk_vu_intf_t * vui;
481 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
482 clib_warning("not a vhost-user interface");
487 mem = xd->vu_vhost_dev.mem;
489 mem->nregions = vum->nregions;
491 for (i=0; i < mem->nregions; i++) {
492 u64 mapped_size, mapped_address;
494 mem->regions[i].guest_phys_address = vum->regions[i].guest_phys_addr;
495 mem->regions[i].guest_phys_address_end = vum->regions[i].guest_phys_addr +
496 vum->regions[i].memory_size;
497 mem->regions[i].memory_size = vum->regions[i].memory_size;
498 mem->regions[i].userspace_address = vum->regions[i].userspace_addr;
500 mapped_size = mem->regions[i].memory_size + vum->regions[i].mmap_offset;
501 mapped_address = pointer_to_uword(mmap(NULL, mapped_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd[i], 0));
503 if (uword_to_pointer(mapped_address, void*) == MAP_FAILED)
505 clib_warning("mmap error");
509 mapped_address += vum->regions[i].mmap_offset;
510 vui->region_addr[i] = mapped_address;
511 vui->region_fd[i] = fd[i];
512 vui->region_offset[i] = vum->regions[i].mmap_offset;
513 mem->regions[i].address_offset = mapped_address - mem->regions[i].guest_phys_address;
515 DBG_SOCK("map memory region %d addr 0x%lx off 0x%lx len 0x%lx",
516 i, vui->region_addr[i], vui->region_offset[i], mapped_size);
518 if (vum->regions[i].guest_phys_addr == 0) {
519 mem->base_address = vum->regions[i].userspace_addr;
520 mem->mapped_address = mem->regions[i].address_offset;
524 disable_interface(xd);
528 static clib_error_t *
529 dpdk_vhost_user_set_vring_num(u32 hw_if_index, u8 idx, u32 num)
532 struct vhost_virtqueue *vq;
534 DBG_SOCK("idx %u num %u", idx, num);
536 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
537 clib_warning("not a vhost-user interface");
540 vq = xd->vu_vhost_dev.virtqueue[idx];
543 stop_processing_packets(hw_if_index, idx);
548 static clib_error_t *
549 dpdk_vhost_user_set_vring_addr(u32 hw_if_index, u8 idx, uword desc, \
550 uword used, uword avail, uword log)
553 struct vhost_virtqueue *vq;
555 DBG_SOCK("idx %u desc 0x%lx used 0x%lx avail 0x%lx log 0x%lx",
556 idx, desc, used, avail, log);
558 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
559 clib_warning("not a vhost-user interface");
562 vq = xd->vu_vhost_dev.virtqueue[idx];
564 vq->desc = (struct vring_desc *) qva_to_vva(&xd->vu_vhost_dev, desc);
565 vq->used = (struct vring_used *) qva_to_vva(&xd->vu_vhost_dev, used);
566 vq->avail = (struct vring_avail *) qva_to_vva(&xd->vu_vhost_dev, avail);
567 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
568 vq->log_guest_addr = log;
571 if (!(vq->desc && vq->used && vq->avail)) {
572 clib_warning("falied to set vring addr");
575 if (vq->last_used_idx != vq->used->idx) {
576 clib_warning("last_used_idx (%u) and vq->used->idx (%u) mismatches; "
577 "some packets maybe resent for Tx and dropped for Rx",
578 vq->last_used_idx, vq->used->idx);
579 vq->last_used_idx = vq->used->idx;
580 vq->last_used_idx_res = vq->used->idx;
584 * Inform the guest that there is no need to inform (kick) the
585 * host when it adds buffers. kick results in vmexit and will
586 * incur performance degradation.
588 * The below function sets a flag in used table. Therefore,
589 * should be initialized after initializing vq->used.
591 rte_vhost_enable_guest_notification(&xd->vu_vhost_dev, idx, 0);
592 stop_processing_packets(hw_if_index, idx);
597 static clib_error_t *
598 dpdk_vhost_user_get_vring_base(u32 hw_if_index, u8 idx, u32 * num)
601 struct vhost_virtqueue *vq;
603 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
604 clib_warning("not a vhost-user interface");
608 vq = xd->vu_vhost_dev.virtqueue[idx];
609 *num = vq->last_used_idx;
613 * Client must start ring upon receiving a kick
614 * (that is, detecting that file descriptor is readable)
615 * on the descriptor specified by VHOST_USER_SET_VRING_KICK,
616 * and stop ring upon receiving VHOST_USER_GET_VRING_BASE.
618 DBG_SOCK("Stopping vring Q %u of device %d", idx, hw_if_index);
619 dpdk_vu_intf_t *vui = xd->vu_intf;
620 vui->vrings[idx].enabled = 0; /* Reset local copy */
621 vui->vrings[idx].callfd = -1; /* Reset FD */
626 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
627 vq->log_guest_addr = 0;
630 /* Check if all Qs are disabled */
631 int numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
632 for (idx = 0; idx < numqs; idx++) {
633 if (xd->vu_vhost_dev.virtqueue[idx]->enabled)
637 /* If all vrings are disabed then disable device */
639 DBG_SOCK("Device %d disabled", hw_if_index);
640 xd->vu_is_running = 0;
646 static clib_error_t *
647 dpdk_vhost_user_set_vring_base(u32 hw_if_index, u8 idx, u32 num)
650 struct vhost_virtqueue *vq;
652 DBG_SOCK("idx %u num %u", idx, num);
654 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
655 clib_warning("not a vhost-user interface");
659 vq = xd->vu_vhost_dev.virtqueue[idx];
660 vq->last_used_idx = num;
661 vq->last_used_idx_res = num;
663 stop_processing_packets(hw_if_index, idx);
668 static clib_error_t *
669 dpdk_vhost_user_set_vring_kick(u32 hw_if_index, u8 idx, int fd)
671 dpdk_main_t * dm = &dpdk_main;
673 dpdk_vu_vring *vring;
674 struct vhost_virtqueue *vq0, *vq1, *vq;
675 int index, vu_is_running = 0;
677 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
678 clib_warning("not a vhost-user interface");
682 vq = xd->vu_vhost_dev.virtqueue[idx];
685 vring = &xd->vu_intf->vrings[idx];
686 vq->enabled = (vq->desc && vq->avail && vq->used && vring->enabled) ? 1 : 0;
689 * Set xd->vu_is_running if at least one pair of
690 * RX/TX queues are enabled.
692 int numqs = VIRTIO_QNUM;
693 numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
695 for (index = 0; index < numqs; index += 2) {
696 vq0 = xd->vu_vhost_dev.virtqueue[index]; /* RX */
697 vq1 = xd->vu_vhost_dev.virtqueue[index + 1]; /* TX */
698 if (vq0->enabled && vq1->enabled)
704 DBG_SOCK("SET_VRING_KICK - idx %d, running %d, fd: %d",
705 idx, vu_is_running, fd);
707 xd->vu_is_running = vu_is_running;
708 if (xd->vu_is_running && xd->admin_up) {
709 vnet_hw_interface_set_flags (dm->vnet_main,
710 xd->vlib_hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP |
711 ETH_LINK_FULL_DUPLEX );
718 dpdk_vhost_user_set_vring_enable(u32 hw_if_index, u8 idx, int enable)
721 struct vhost_virtqueue *vq;
724 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
725 clib_warning("not a vhost-user interface");
731 * Guest vhost driver wrongly enables queue before
732 * setting the vring address. Therefore, save a
733 * local copy. Reflect it in vq structure if addresses
734 * are set. If not, vq will be enabled when vring
737 vui->vrings[idx].enabled = enable; /* Save local copy */
739 int numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
741 if (! vui->vrings[numqs].enabled)
745 if (numqs == -1) /* All Qs are enabled */
750 vq = xd->vu_vhost_dev.virtqueue[idx];
751 if (vq->desc && vq->avail && vq->used)
752 xd->vu_vhost_dev.virtqueue[idx]->enabled = enable;
757 static clib_error_t * dpdk_vhost_user_callfd_read_ready (unix_file_t * uf)
759 __attribute__((unused)) int n;
761 n = read(uf->file_descriptor, ((char*)&buff), 8);
765 static clib_error_t *
766 dpdk_vhost_user_set_vring_call(u32 hw_if_index, u8 idx, int fd)
769 struct vhost_virtqueue *vq;
770 unix_file_t template = {0};
772 DBG_SOCK("SET_VRING_CALL - idx %d, fd %d", idx, fd);
774 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
775 clib_warning("not a vhost-user interface");
779 dpdk_vu_intf_t *vui = xd->vu_intf;
781 /* if there is old fd, delete it */
782 if (vui->vrings[idx].callfd > 0) {
783 unix_file_t * uf = pool_elt_at_index (unix_main.file_pool,
784 vui->vrings[idx].callfd_idx);
785 unix_file_del (&unix_main, uf);
787 vui->vrings[idx].callfd = fd;
788 template.read_function = dpdk_vhost_user_callfd_read_ready;
789 template.file_descriptor = fd;
790 vui->vrings[idx].callfd_idx = unix_file_add (&unix_main, &template);
792 vq = xd->vu_vhost_dev.virtqueue[idx];
793 vq->callfd = -1; /* We use locally saved vring->callfd; */
799 dpdk_vhost_user_want_interrupt(dpdk_device_t *xd, int idx)
801 dpdk_vu_intf_t *vui = xd->vu_intf;
804 if (PREDICT_FALSE(vui->num_vrings <= 0))
807 dpdk_vu_vring *vring = &(vui->vrings[idx]);
808 struct vhost_virtqueue *vq = xd->vu_vhost_dev.virtqueue[idx];
810 /* return if vm is interested in interrupts */
811 return (vring->callfd > 0) && !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
815 dpdk_vhost_user_send_interrupt(vlib_main_t * vm, dpdk_device_t * xd, int idx)
817 dpdk_main_t * dm = &dpdk_main;
818 dpdk_vu_intf_t *vui = xd->vu_intf;
821 if (PREDICT_FALSE(vui->num_vrings <= 0))
824 dpdk_vu_vring *vring = &(vui->vrings[idx]);
825 struct vhost_virtqueue *vq = xd->vu_vhost_dev.virtqueue[idx];
827 /* if vm is interested in interrupts */
828 if((vring->callfd > 0) && !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
829 eventfd_write(vring->callfd, (eventfd_t)1);
830 vring->n_since_last_int = 0;
831 vring->int_deadline = vlib_time_now(vm) + dm->conf->vhost_coalesce_time;
836 * vhost-user interface management functions
839 // initialize vui with specified attributes
841 dpdk_vhost_user_vui_init(vnet_main_t * vnm,
842 dpdk_device_t *xd, int sockfd,
843 const char * sock_filename,
844 u8 is_server, u64 feature_mask,
847 dpdk_vu_intf_t *vui = xd->vu_intf;
848 memset(vui, 0, sizeof(*vui));
850 vui->unix_fd = sockfd;
851 vui->num_vrings = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
852 DBG_SOCK("dpdk_vhost_user_vui_init VRINGS: %d", vui->num_vrings);
853 vui->sock_is_server = is_server;
854 strncpy(vui->sock_filename, sock_filename, ARRAY_LEN(vui->sock_filename)-1);
857 vui->feature_mask = feature_mask;
859 vui->unix_file_index = ~0;
861 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
864 *sw_if_index = xd->vlib_sw_if_index;
867 // register vui and start polling on it
869 dpdk_vhost_user_vui_register(vlib_main_t * vm, dpdk_device_t *xd)
871 dpdk_main_t * dm = &dpdk_main;
872 dpdk_vu_intf_t *vui = xd->vu_intf;
874 hash_set (dm->vu_sw_if_index_by_listener_fd, vui->unix_fd,
875 xd->vlib_sw_if_index);
878 static void dpdk_unmap_all_mem_regions(dpdk_device_t * xd)
881 dpdk_vu_intf_t *vui = xd->vu_intf;
882 struct virtio_memory * mem = xd->vu_vhost_dev.mem;
884 for (i=0; i<mem->nregions; i++) {
885 if (vui->region_addr[i] != -1) {
887 long page_sz = get_huge_page_size(vui->region_fd[i]);
889 ssize_t map_sz = (mem->regions[i].memory_size +
890 vui->region_offset[i] + page_sz) & ~(page_sz - 1);
892 r = munmap((void *)(vui->region_addr[i] - vui->region_offset[i]), map_sz);
894 DBG_SOCK("unmap memory region %d addr 0x%lx off 0x%lx len 0x%lx page_sz 0x%x",
895 i, vui->region_addr[i], vui->region_offset[i], map_sz, page_sz);
897 vui->region_addr[i]= -1;
900 clib_unix_warning("failed to unmap memory region");
902 close(vui->region_fd[i]);
909 dpdk_vhost_user_if_disconnect(dpdk_device_t * xd)
911 dpdk_vu_intf_t *vui = xd->vu_intf;
912 vnet_main_t * vnm = vnet_get_main();
913 dpdk_main_t * dm = &dpdk_main;
914 struct vhost_virtqueue *vq;
918 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
920 if (vui->unix_file_index != ~0) {
921 unix_file_del (&unix_main, unix_main.file_pool + vui->unix_file_index);
922 vui->unix_file_index = ~0;
925 hash_unset(dm->vu_sw_if_index_by_sock_fd, vui->unix_fd);
926 hash_unset(dm->vu_sw_if_index_by_listener_fd, vui->unix_fd);
931 for (q = 0; q < vui->num_vrings; q++) {
932 vq = xd->vu_vhost_dev.virtqueue[q];
933 vui->vrings[q].enabled = 0; /* Reset local copy */
934 vui->vrings[q].callfd = -1; /* Reset FD */
936 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
937 vq->log_guest_addr = 0;
943 xd->vu_is_running = 0;
945 dpdk_unmap_all_mem_regions(xd);
946 DBG_SOCK("interface ifindex %d disconnected", xd->vlib_sw_if_index);
949 static clib_error_t * dpdk_vhost_user_socket_read (unix_file_t * uf)
952 int fd, number_of_fds = 0;
953 int fds[VHOST_MEMORY_MAX_NREGIONS];
954 vhost_user_msg_t msg;
957 dpdk_main_t * dm = &dpdk_main;
960 struct cmsghdr *cmsg;
963 vnet_main_t * vnm = vnet_get_main();
965 p = hash_get (dm->vu_sw_if_index_by_sock_fd, uf->file_descriptor);
967 DBG_SOCK ("FD %d doesn't belong to any interface",
968 uf->file_descriptor);
972 xd = dpdk_vhost_user_device_from_sw_if_index(p[0]);
977 char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))];
979 memset(&mh, 0, sizeof(mh));
980 memset(control, 0, sizeof(control));
982 /* set the payload */
983 iov[0].iov_base = (void *) &msg;
984 iov[0].iov_len = VHOST_USER_MSG_HDR_SZ;
988 mh.msg_control = control;
989 mh.msg_controllen = sizeof(control);
991 n = recvmsg(uf->file_descriptor, &mh, 0);
993 if (n != VHOST_USER_MSG_HDR_SZ)
996 if (mh.msg_flags & MSG_CTRUNC) {
1000 cmsg = CMSG_FIRSTHDR(&mh);
1002 if (cmsg && (cmsg->cmsg_len > 0) && (cmsg->cmsg_level == SOL_SOCKET) &&
1003 (cmsg->cmsg_type == SCM_RIGHTS) &&
1004 (cmsg->cmsg_len - CMSG_LEN(0) <= VHOST_MEMORY_MAX_NREGIONS * sizeof(int))) {
1005 number_of_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
1006 clib_memcpy(fds, CMSG_DATA(cmsg), number_of_fds * sizeof(int));
1009 /* version 1, no reply bit set*/
1010 if ((msg.flags & 7) != 1) {
1011 DBG_SOCK("malformed message received. closing socket");
1016 int rv __attribute__((unused));
1017 /* $$$$ pay attention to rv */
1018 rv = read(uf->file_descriptor, ((char*)&msg) + n, msg.size);
1021 DBG_SOCK("VPP VHOST message %s", vhost_message_str[msg.request]);
1022 switch (msg.request) {
1023 case VHOST_USER_GET_FEATURES:
1024 DBG_SOCK("if %d msg VHOST_USER_GET_FEATURES",
1025 xd->vlib_hw_if_index);
1027 msg.flags |= VHOST_USER_REPLY_MASK;
1029 dpdk_vhost_user_get_features(xd->vlib_hw_if_index, &msg.u64);
1030 msg.u64 &= vui->feature_mask;
1031 msg.size = sizeof(msg.u64);
1034 case VHOST_USER_SET_FEATURES:
1035 DBG_SOCK("if %d msg VHOST_USER_SET_FEATURES features 0x%016lx",
1036 xd->vlib_hw_if_index, msg.u64);
1038 dpdk_vhost_user_set_features(xd->vlib_hw_if_index, msg.u64);
1041 case VHOST_USER_SET_MEM_TABLE:
1042 DBG_SOCK("if %d msg VHOST_USER_SET_MEM_TABLE nregions %d",
1043 xd->vlib_hw_if_index, msg.memory.nregions);
1045 if ((msg.memory.nregions < 1) ||
1046 (msg.memory.nregions > VHOST_MEMORY_MAX_NREGIONS)) {
1048 DBG_SOCK("number of mem regions must be between 1 and %i",
1049 VHOST_MEMORY_MAX_NREGIONS);
1054 if (msg.memory.nregions != number_of_fds) {
1055 DBG_SOCK("each memory region must have FD");
1059 dpdk_vhost_user_set_mem_table(xd->vlib_hw_if_index, &msg.memory, fds);
1062 case VHOST_USER_SET_VRING_NUM:
1063 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_NUM idx %d num %d",
1064 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1066 if ((msg.state.num > 32768) || /* maximum ring size is 32768 */
1067 (msg.state.num == 0) || /* it cannot be zero */
1068 (msg.state.num % 2)) /* must be power of 2 */
1071 dpdk_vhost_user_set_vring_num(xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1074 case VHOST_USER_SET_VRING_ADDR:
1075 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_ADDR idx %d",
1076 xd->vlib_hw_if_index, msg.state.index);
1078 dpdk_vhost_user_set_vring_addr(xd->vlib_hw_if_index, msg.state.index,
1079 msg.addr.desc_user_addr,
1080 msg.addr.used_user_addr,
1081 msg.addr.avail_user_addr,
1082 msg.addr.log_guest_addr);
1085 case VHOST_USER_SET_OWNER:
1086 DBG_SOCK("if %d msg VHOST_USER_SET_OWNER",
1087 xd->vlib_hw_if_index);
1090 case VHOST_USER_RESET_OWNER:
1091 DBG_SOCK("if %d msg VHOST_USER_RESET_OWNER",
1092 xd->vlib_hw_if_index);
1095 case VHOST_USER_SET_VRING_CALL:
1096 q = (u8) (msg.u64 & 0xFF);
1098 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_CALL u64 %lx, idx: %d",
1099 xd->vlib_hw_if_index, msg.u64, q);
1101 if (!(msg.u64 & 0x100))
1103 if (number_of_fds != 1)
1109 dpdk_vhost_user_set_vring_call(xd->vlib_hw_if_index, q, fd);
1113 case VHOST_USER_SET_VRING_KICK:
1115 q = (u8) (msg.u64 & 0xFF);
1117 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_KICK u64 %lx, idx: %d",
1118 xd->vlib_hw_if_index, msg.u64, q);
1120 if (!(msg.u64 & 0x100))
1122 if (number_of_fds != 1)
1125 vui->vrings[q].kickfd = fds[0];
1128 vui->vrings[q].kickfd = -1;
1130 dpdk_vhost_user_set_vring_kick(xd->vlib_hw_if_index, q, vui->vrings[q].kickfd);
1133 case VHOST_USER_SET_VRING_ERR:
1135 q = (u8) (msg.u64 & 0xFF);
1137 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_ERR u64 %lx, idx: %d",
1138 xd->vlib_hw_if_index, msg.u64, q);
1140 if (!(msg.u64 & 0x100))
1142 if (number_of_fds != 1)
1150 vui->vrings[q].errfd = fd;
1153 case VHOST_USER_SET_VRING_BASE:
1154 DBG_SOCK("if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
1155 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1157 dpdk_vhost_user_set_vring_base(xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1160 case VHOST_USER_GET_VRING_BASE:
1161 DBG_SOCK("if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
1162 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1164 msg.flags |= VHOST_USER_REPLY_MASK;
1165 msg.size = sizeof(msg.state);
1167 dpdk_vhost_user_get_vring_base(xd->vlib_hw_if_index, msg.state.index, &msg.state.num);
1170 case VHOST_USER_NONE:
1171 DBG_SOCK("if %d msg VHOST_USER_NONE",
1172 xd->vlib_hw_if_index);
1175 case VHOST_USER_SET_LOG_BASE:
1176 #if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
1177 DBG_SOCK("if %d msg VHOST_USER_SET_LOG_BASE",
1178 xd->vlib_hw_if_index);
1180 if (msg.size != sizeof(msg.log)) {
1181 DBG_SOCK("invalid msg size for VHOST_USER_SET_LOG_BASE: %u instead of %lu",
1182 msg.size, sizeof(msg.log));
1186 if (!(xd->vu_vhost_dev.protocol_features & (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD))) {
1187 DBG_SOCK("VHOST_USER_PROTOCOL_F_LOG_SHMFD not set but VHOST_USER_SET_LOG_BASE received");
1192 /* align size to 2M page */
1193 long page_sz = get_huge_page_size(fd);
1194 ssize_t map_sz = (msg.log.size + msg.log.offset + page_sz) & ~(page_sz - 1);
1196 void *addr = mmap(0, map_sz, PROT_READ | PROT_WRITE,
1199 DBG_SOCK("map log region addr 0 len 0x%lx off 0x%lx fd %d mapped %p",
1200 map_sz, msg.log.offset, fd, addr);
1202 if (addr == MAP_FAILED) {
1203 clib_warning("failed to map memory. errno is %d", errno);
1207 xd->vu_vhost_dev.log_base += pointer_to_uword(addr) + msg.log.offset;
1208 xd->vu_vhost_dev.log_size = msg.log.size;
1209 msg.flags |= VHOST_USER_REPLY_MASK;
1210 msg.size = sizeof(msg.u64);
1212 DBG_SOCK("if %d msg VHOST_USER_SET_LOG_BASE Not-Implemented",
1213 xd->vlib_hw_if_index);
1217 case VHOST_USER_SET_LOG_FD:
1218 DBG_SOCK("if %d msg VHOST_USER_SET_LOG_FD",
1219 xd->vlib_hw_if_index);
1222 case VHOST_USER_GET_PROTOCOL_FEATURES:
1223 DBG_SOCK("if %d msg VHOST_USER_GET_PROTOCOL_FEATURES",
1224 xd->vlib_hw_if_index);
1226 msg.flags |= VHOST_USER_REPLY_MASK;
1227 msg.u64 = VHOST_USER_PROTOCOL_FEATURES;
1228 DBG_SOCK("VHOST_USER_PROTOCOL_FEATURES: %llx", VHOST_USER_PROTOCOL_FEATURES);
1229 msg.size = sizeof(msg.u64);
1232 case VHOST_USER_SET_PROTOCOL_FEATURES:
1233 DBG_SOCK("if %d msg VHOST_USER_SET_PROTOCOL_FEATURES",
1234 xd->vlib_hw_if_index);
1236 DBG_SOCK("VHOST_USER_SET_PROTOCOL_FEATURES: 0x%lx",
1238 dpdk_vhost_user_set_protocol_features(xd->vlib_hw_if_index,
1242 case VHOST_USER_SET_VRING_ENABLE:
1243 DBG_SOCK("%d VPP VHOST_USER_SET_VRING_ENABLE IDX: %d, Enable: %d",
1244 xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1245 dpdk_vhost_user_set_vring_enable
1246 (xd->vlib_hw_if_index, msg.state.index, msg.state.num);
1249 case VHOST_USER_GET_QUEUE_NUM:
1250 DBG_SOCK("if %d msg VHOST_USER_GET_QUEUE_NUM:",
1251 xd->vlib_hw_if_index);
1253 msg.flags |= VHOST_USER_REPLY_MASK;
1254 msg.u64 = xd->vu_vhost_dev.virt_qp_nb;
1255 msg.size = sizeof(msg.u64);
1259 DBG_SOCK("unknown vhost-user message %d received. closing socket",
1264 /* if we have pointers to descriptor table, go up*/
1266 xd->vu_vhost_dev.virtqueue[VHOST_NET_VRING_IDX_TX]->desc &&
1267 xd->vu_vhost_dev.virtqueue[VHOST_NET_VRING_IDX_RX]->desc) {
1269 DBG_SOCK("interface %d connected", xd->vlib_sw_if_index);
1271 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP);
1276 /* if we need to reply */
1277 if (msg.flags & VHOST_USER_REPLY_MASK)
1279 n = send(uf->file_descriptor, &msg, VHOST_USER_MSG_HDR_SZ + msg.size, 0);
1280 if (n != (msg.size + VHOST_USER_MSG_HDR_SZ))
1287 DBG_SOCK("error: close_socket");
1288 dpdk_vhost_user_if_disconnect(xd);
1292 static clib_error_t * dpdk_vhost_user_socket_error (unix_file_t * uf)
1294 dpdk_main_t * dm = &dpdk_main;
1298 p = hash_get (dm->vu_sw_if_index_by_sock_fd, uf->file_descriptor);
1300 DBG_SOCK ("FD %d doesn't belong to any interface",
1301 uf->file_descriptor);
1305 xd = dpdk_vhost_user_device_from_sw_if_index(p[0]);
1307 dpdk_vhost_user_if_disconnect(xd);
1311 static clib_error_t * dpdk_vhost_user_socksvr_accept_ready (unix_file_t * uf)
1313 int client_fd, client_len;
1314 struct sockaddr_un client;
1315 unix_file_t template = {0};
1316 dpdk_main_t * dm = &dpdk_main;
1317 dpdk_device_t * xd = NULL;
1318 dpdk_vu_intf_t * vui;
1321 p = hash_get (dm->vu_sw_if_index_by_listener_fd,
1322 uf->file_descriptor);
1324 DBG_SOCK ("fd %d doesn't belong to any interface",
1325 uf->file_descriptor);
1329 xd = dpdk_vhost_user_device_from_sw_if_index(p[0]);
1333 client_len = sizeof(client);
1334 client_fd = accept (uf->file_descriptor,
1335 (struct sockaddr *)&client,
1336 (socklen_t *)&client_len);
1339 return clib_error_return_unix (0, "accept");
1341 template.read_function = dpdk_vhost_user_socket_read;
1342 template.error_function = dpdk_vhost_user_socket_error;
1343 template.file_descriptor = client_fd;
1344 vui->unix_file_index = unix_file_add (&unix_main, &template);
1346 vui->client_fd = client_fd;
1347 hash_set (dm->vu_sw_if_index_by_sock_fd, vui->client_fd,
1348 xd->vlib_sw_if_index);
1353 // init server socket on specified sock_filename
1354 static int dpdk_vhost_user_init_server_sock(const char * sock_filename, int *sockfd)
1357 struct sockaddr_un un = {};
1359 /* create listening socket */
1360 fd = socket(AF_UNIX, SOCK_STREAM, 0);
1363 return VNET_API_ERROR_SYSCALL_ERROR_1;
1366 un.sun_family = AF_UNIX;
1367 strcpy((char *) un.sun_path, (char *) sock_filename);
1369 /* remove if exists */
1370 unlink( (char *) sock_filename);
1372 if (bind(fd, (struct sockaddr *) &un, sizeof(un)) == -1) {
1373 rv = VNET_API_ERROR_SYSCALL_ERROR_2;
1377 if (listen(fd, 1) == -1) {
1378 rv = VNET_API_ERROR_SYSCALL_ERROR_3;
1382 unix_file_t template = {0};
1383 template.read_function = dpdk_vhost_user_socksvr_accept_ready;
1384 template.file_descriptor = fd;
1385 unix_file_add (&unix_main, &template);
1395 * vhost-user interface control functions used from vpe api
1398 int dpdk_vhost_user_create_if(vnet_main_t * vnm, vlib_main_t * vm,
1399 const char * sock_filename,
1403 u8 renumber, u32 custom_dev_instance,
1406 dpdk_main_t * dm = &dpdk_main;
1412 // using virtio vhost user?
1413 if (dm->conf->use_virtio_vhost) {
1414 return vhost_user_create_if(vnm, vm, sock_filename, is_server,
1415 sw_if_index, feature_mask, renumber, custom_dev_instance, hwaddr);
1419 if ((rv = dpdk_vhost_user_init_server_sock (sock_filename, &sockfd)) != 0) {
1425 // set next vhost-user if id if custom one is higher or equal
1426 if (custom_dev_instance >= dm->next_vu_if_id)
1427 dm->next_vu_if_id = custom_dev_instance + 1;
1429 dpdk_create_vhost_user_if_internal(&hw_if_idx, custom_dev_instance, hwaddr);
1431 dpdk_create_vhost_user_if_internal(&hw_if_idx, (u32)~0, hwaddr);
1432 DBG_SOCK("dpdk vhost-user interface created hw_if_index %d", hw_if_idx);
1434 xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_idx);
1437 dpdk_vhost_user_vui_init (vnm, xd, sockfd, sock_filename, is_server,
1438 feature_mask, sw_if_index);
1440 dpdk_vhost_user_vui_register (vm, xd);
1444 int dpdk_vhost_user_modify_if(vnet_main_t * vnm, vlib_main_t * vm,
1445 const char * sock_filename,
1449 u8 renumber, u32 custom_dev_instance)
1451 dpdk_main_t * dm = &dpdk_main;
1453 dpdk_vu_intf_t * vui = NULL;
1458 // using virtio vhost user?
1459 if (dm->conf->use_virtio_vhost) {
1460 return vhost_user_modify_if(vnm, vm, sock_filename, is_server,
1461 sw_if_index, feature_mask, renumber, custom_dev_instance);
1464 xd = dpdk_vhost_user_device_from_sw_if_index(sw_if_index);
1467 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
1471 // interface is inactive
1473 // disconnect interface sockets
1474 dpdk_vhost_user_if_disconnect(xd);
1477 if ((rv = dpdk_vhost_user_init_server_sock (sock_filename, &sockfd)) != 0) {
1482 dpdk_vhost_user_vui_init (vnm, xd, sockfd, sock_filename, is_server,
1483 feature_mask, &sw_if_idx);
1486 vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
1489 dpdk_vhost_user_vui_register (vm, xd);
1494 int dpdk_vhost_user_delete_if(vnet_main_t * vnm, vlib_main_t * vm,
1497 dpdk_main_t * dm = &dpdk_main;
1498 dpdk_device_t * xd = NULL;
1499 dpdk_vu_intf_t * vui;
1502 // using virtio vhost user?
1503 if (dm->conf->use_virtio_vhost) {
1504 return vhost_user_delete_if(vnm, vm, sw_if_index);
1507 xd = dpdk_vhost_user_device_from_sw_if_index(sw_if_index);
1510 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
1514 // interface is inactive
1516 // disconnect interface sockets
1517 dpdk_vhost_user_if_disconnect(xd);
1518 // add to inactive interface list
1519 vec_add1 (dm->vu_inactive_interfaces_device_index, xd->device_index);
1521 ethernet_delete_interface (vnm, xd->vlib_hw_if_index);
1522 DBG_SOCK ("deleted (deactivated) vhost-user interface sw_if_index %d", sw_if_index);
1527 int dpdk_vhost_user_dump_ifs(vnet_main_t * vnm, vlib_main_t * vm, vhost_user_intf_details_t **out_vuids)
1530 dpdk_main_t * dm = &dpdk_main;
1532 dpdk_vu_intf_t * vui;
1533 struct virtio_net * vhost_dev;
1534 vhost_user_intf_details_t * r_vuids = NULL;
1535 vhost_user_intf_details_t * vuid = NULL;
1536 u32 * hw_if_indices = 0;
1537 vnet_hw_interface_t * hi;
1544 // using virtio vhost user?
1545 if (dm->conf->use_virtio_vhost) {
1546 return vhost_user_dump_ifs(vnm, vm, out_vuids);
1549 vec_foreach (xd, dm->devices) {
1550 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER &&
1551 xd->vu_intf->active)
1552 vec_add1(hw_if_indices, xd->vlib_hw_if_index);
1555 for (i = 0; i < vec_len (hw_if_indices); i++) {
1556 hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
1557 xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_indices[i]);
1559 clib_warning("invalid vhost-user interface hw_if_index %d", hw_if_indices[i]);
1564 ASSERT(vui != NULL);
1565 vhost_dev = &xd->vu_vhost_dev;
1566 u32 virtio_net_hdr_sz = (vui->num_vrings > 0 ?
1567 vhost_dev->virtqueue[0]->vhost_hlen : 0);
1569 vec_add2(r_vuids, vuid, 1);
1570 vuid->sw_if_index = xd->vlib_sw_if_index;
1571 vuid->virtio_net_hdr_sz = virtio_net_hdr_sz;
1572 vuid->features = vhost_dev->features;
1573 vuid->is_server = vui->sock_is_server;
1574 vuid->num_regions = (vhost_dev->mem != NULL ? vhost_dev->mem->nregions : 0);
1575 vuid->sock_errno = vui->sock_errno;
1576 strncpy((char *)vuid->sock_filename, (char *)vui->sock_filename,
1577 ARRAY_LEN(vuid->sock_filename)-1);
1579 s = format (s, "%v%c", hi->name, 0);
1581 strncpy((char *)vuid->if_name, (char *)s,
1582 ARRAY_LEN(vuid->if_name)-1);
1587 vec_free (hw_if_indices);
1589 *out_vuids = r_vuids;
1595 * Processing functions called from dpdk process fn
1599 struct sockaddr_un sun;
1601 unix_file_t template;
1603 } dpdk_vu_process_state;
1605 void dpdk_vhost_user_process_init (void **ctx)
1607 dpdk_vu_process_state *state = clib_mem_alloc (sizeof(dpdk_vu_process_state));
1608 memset(state, 0, sizeof(*state));
1609 state->sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
1610 state->sun.sun_family = AF_UNIX;
1611 state->template.read_function = dpdk_vhost_user_socket_read;
1612 state->template.error_function = dpdk_vhost_user_socket_error;
1613 state->event_data = 0;
1617 void dpdk_vhost_user_process_cleanup (void *ctx)
1622 uword dpdk_vhost_user_process_if (vlib_main_t *vm, dpdk_device_t *xd, void *ctx)
1624 dpdk_main_t * dm = &dpdk_main;
1625 dpdk_vu_process_state *state = (dpdk_vu_process_state *)ctx;
1626 dpdk_vu_intf_t *vui = xd->vu_intf;
1628 if (vui->sock_is_server || !vui->active)
1631 if (vui->unix_fd == -1) {
1632 /* try to connect */
1633 strncpy(state->sun.sun_path, (char *) vui->sock_filename, sizeof(state->sun.sun_path) - 1);
1635 if (connect(state->sockfd, (struct sockaddr *) &(state->sun), sizeof(struct sockaddr_un)) == 0) {
1636 vui->sock_errno = 0;
1637 vui->unix_fd = state->sockfd;
1638 state->template.file_descriptor = state->sockfd;
1639 vui->unix_file_index = unix_file_add (&unix_main, &(state->template));
1640 hash_set (dm->vu_sw_if_index_by_sock_fd, state->sockfd, xd->vlib_sw_if_index);
1642 state->sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
1643 if (state->sockfd < 0)
1646 vui->sock_errno = errno;
1649 /* check if socket is alive */
1651 socklen_t len = sizeof (error);
1652 int retval = getsockopt(vui->unix_fd, SOL_SOCKET, SO_ERROR, &error, &len);
1655 dpdk_vhost_user_if_disconnect(xd);
1664 static clib_error_t *
1665 dpdk_vhost_user_connect_command_fn (vlib_main_t * vm,
1666 unformat_input_t * input,
1667 vlib_cli_command_t * cmd)
1669 dpdk_main_t * dm = &dpdk_main;
1670 unformat_input_t _line_input, * line_input = &_line_input;
1671 u8 * sock_filename = NULL;
1674 u64 feature_mask = (u64)~0;
1676 u32 custom_dev_instance = ~0;
1680 if (dm->conf->use_virtio_vhost) {
1681 return vhost_user_connect_command_fn(vm, input, cmd);
1684 /* Get a line of input. */
1685 if (! unformat_user (input, unformat_line_input, line_input))
1688 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
1689 if (unformat (line_input, "socket %s", &sock_filename))
1691 else if (unformat (line_input, "server"))
1693 else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
1695 else if (unformat (line_input, "hwaddr %U", unformat_ethernet_address, hwaddr))
1697 else if (unformat (line_input, "renumber %d", &custom_dev_instance)) {
1701 return clib_error_return (0, "unknown input `%U'",
1702 format_unformat_error, input);
1704 unformat_free (line_input);
1706 vnet_main_t *vnm = vnet_get_main();
1707 if (sock_filename == NULL)
1708 return clib_error_return (0, "missing socket file");
1710 dpdk_vhost_user_create_if(vnm, vm, (char *)sock_filename,
1711 is_server, &sw_if_index, feature_mask,
1712 renumber, custom_dev_instance, hw);
1714 vec_free(sock_filename);
1715 vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index);
1719 VLIB_CLI_COMMAND (dpdk_vhost_user_connect_command, static) = {
1720 .path = "create vhost-user",
1721 .short_help = "create vhost-user socket <socket-filename> [server] [feature-mask <hex>] [renumber <dev_instance>]",
1722 .function = dpdk_vhost_user_connect_command_fn,
1725 static clib_error_t *
1726 dpdk_vhost_user_delete_command_fn (vlib_main_t * vm,
1727 unformat_input_t * input,
1728 vlib_cli_command_t * cmd)
1730 dpdk_main_t * dm = &dpdk_main;
1731 clib_error_t * error = 0;
1732 unformat_input_t _line_input, * line_input = &_line_input;
1733 u32 sw_if_index = ~0;
1735 if (dm->conf->use_virtio_vhost) {
1736 return vhost_user_delete_command_fn(vm, input, cmd);
1739 /* Get a line of input. */
1740 if (! unformat_user (input, unformat_line_input, line_input))
1743 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
1744 if (unformat (line_input, "sw_if_index %d", &sw_if_index))
1747 return clib_error_return (0, "unknown input `%U'",
1748 format_unformat_error, input);
1750 unformat_free (line_input);
1752 if (sw_if_index == ~0) {
1753 error = clib_error_return (0, "invalid sw_if_index",
1754 format_unformat_error, input);
1758 vnet_main_t *vnm = vnet_get_main();
1760 dpdk_vhost_user_delete_if(vnm, vm, sw_if_index);
1765 VLIB_CLI_COMMAND (dpdk_vhost_user_delete_command, static) = {
1766 .path = "delete vhost-user",
1767 .short_help = "delete vhost-user sw_if_index <nn>",
1768 .function = dpdk_vhost_user_delete_command_fn,
1771 #define foreach_dpdk_vhost_feature \
1772 _ (VIRTIO_NET_F_MRG_RXBUF) \
1773 _ (VIRTIO_NET_F_CTRL_VQ) \
1774 _ (VIRTIO_NET_F_CTRL_RX)
1776 static clib_error_t *
1777 show_dpdk_vhost_user_command_fn (vlib_main_t * vm,
1778 unformat_input_t * input,
1779 vlib_cli_command_t * cmd)
1781 clib_error_t * error = 0;
1782 dpdk_main_t * dm = &dpdk_main;
1783 vnet_main_t * vnm = vnet_get_main();
1785 dpdk_vu_intf_t * vui;
1786 struct virtio_net * vhost_dev;
1787 u32 hw_if_index, * hw_if_indices = 0;
1788 vnet_hw_interface_t * hi;
1791 struct virtio_memory * mem;
1792 struct feat_struct { u8 bit; char *str;};
1793 struct feat_struct *feat_entry;
1795 static struct feat_struct feat_array[] = {
1796 #define _(f) { .str = #f, .bit = f, },
1797 foreach_dpdk_vhost_feature
1802 if (dm->conf->use_virtio_vhost) {
1803 return show_vhost_user_command_fn(vm, input, cmd);
1806 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
1807 if (unformat (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index)) {
1808 vec_add1 (hw_if_indices, hw_if_index);
1809 vlib_cli_output(vm, "add %d", hw_if_index);
1811 else if (unformat (input, "descriptors") || unformat (input, "desc") )
1814 error = clib_error_return (0, "unknown input `%U'",
1815 format_unformat_error, input);
1819 if (vec_len (hw_if_indices) == 0) {
1820 vec_foreach (xd, dm->devices) {
1821 if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER && xd->vu_intf->active)
1822 vec_add1(hw_if_indices, xd->vlib_hw_if_index);
1826 vlib_cli_output (vm, "DPDK vhost-user interfaces");
1827 vlib_cli_output (vm, "Global:\n coalesce frames %d time %e\n\n",
1828 dm->conf->vhost_coalesce_frames, dm->conf->vhost_coalesce_time);
1830 for (i = 0; i < vec_len (hw_if_indices); i++) {
1831 hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
1833 if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_indices[i]))) {
1834 error = clib_error_return (0, "not dpdk vhost-user interface: '%s'",
1839 vhost_dev = &xd->vu_vhost_dev;
1840 mem = vhost_dev->mem;
1841 u32 virtio_net_hdr_sz = (vui->num_vrings > 0 ?
1842 vhost_dev->virtqueue[0]->vhost_hlen : 0);
1844 vlib_cli_output (vm, "Interface: %v (ifindex %d)",
1845 hi->name, hw_if_indices[i]);
1847 vlib_cli_output (vm, "virtio_net_hdr_sz %d\n features (0x%llx): \n",
1848 virtio_net_hdr_sz, xd->vu_vhost_dev.features);
1850 feat_entry = (struct feat_struct *) &feat_array;
1851 while(feat_entry->str) {
1852 if (xd->vu_vhost_dev.features & (1 << feat_entry->bit))
1853 vlib_cli_output (vm, " %s (%d)", feat_entry->str, feat_entry->bit);
1857 vlib_cli_output (vm, "\n");
1859 vlib_cli_output (vm, " socket filename %s type %s errno \"%s\"\n\n",
1860 vui->sock_filename, vui->sock_is_server ? "server" : "client",
1861 strerror(vui->sock_errno));
1863 vlib_cli_output (vm, " Memory regions (total %d)\n", mem->nregions);
1866 vlib_cli_output(vm, " region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr\n");
1867 vlib_cli_output(vm, " ====== ===== ================== ================== ================== ================== ==================\n");
1869 for (j = 0; j < mem->nregions; j++) {
1870 vlib_cli_output(vm, " %d %-5d 0x%016lx 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n", j,
1872 mem->regions[j].guest_phys_address,
1873 mem->regions[j].memory_size,
1874 mem->regions[j].userspace_address,
1875 mem->regions[j].address_offset,
1876 vui->region_addr[j]);
1878 for (q = 0; q < vui->num_vrings; q++) {
1879 struct vhost_virtqueue *vq = vhost_dev->virtqueue[q];
1880 const char *qtype = (q & 1) ? "TX" : "RX";
1882 vlib_cli_output(vm, "\n Virtqueue %d (%s)\n", q/2, qtype);
1884 vlib_cli_output(vm, " qsz %d last_used_idx %d last_used_idx_res %d\n",
1885 vq->size, vq->last_used_idx, vq->last_used_idx_res);
1887 if (vq->avail && vq->used)
1888 vlib_cli_output(vm, " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
1889 vq->avail->flags, vq->avail->idx, vq->used->flags, vq->used->idx);
1891 vlib_cli_output(vm, " kickfd %d callfd %d errfd %d enabled %d\n",
1892 vq->kickfd, vq->callfd, vui->vrings[q].errfd, vq->enabled);
1894 if (show_descr && vq->enabled) {
1895 vlib_cli_output(vm, "\n descriptor table:\n");
1896 vlib_cli_output(vm, " id addr len flags next user_addr\n");
1897 vlib_cli_output(vm, " ===== ================== ===== ====== ===== ==================\n");
1898 for(j = 0; j < vq->size; j++) {
1899 vlib_cli_output(vm, " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
1905 pointer_to_uword(map_guest_mem(xd, vq->desc[j].addr)));}
1908 vlib_cli_output (vm, "\n");
1911 vec_free (hw_if_indices);
1915 VLIB_CLI_COMMAND (show_vhost_user_command, static) = {
1916 .path = "show vhost-user",
1917 .short_help = "show vhost-user interface",
1918 .function = show_dpdk_vhost_user_command_fn,