*/
-static uint64_t
-qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
+static uword
+qva_to_vva(struct virtio_net *dev, uword qemu_va)
{
struct virtio_memory_regions *region;
- uint64_t vhost_va = 0;
+ uword vhost_va = 0;
uint32_t regionidx = 0;
/* Find the region where the address lives. */
xd->vu_is_running = 0;
}
-static inline void * map_guest_mem(dpdk_device_t * xd, u64 addr)
+static inline void * map_guest_mem(dpdk_device_t * xd, uword addr)
{
dpdk_vu_intf_t * vui = xd->vu_intf;
struct virtio_memory * mem = xd->vu_vhost_dev.mem;
for (i=0; i<mem->nregions; i++) {
if ((mem->regions[i].guest_phys_address <= addr) &&
((mem->regions[i].guest_phys_address + mem->regions[i].memory_size) > addr)) {
- return (void *) (vui->region_addr[i] + addr - mem->regions[i].guest_phys_address);
+ return (void *) ((uword)vui->region_addr[i] + addr - (uword)mem->regions[i].guest_phys_address);
}
}
DBG_SOCK("failed to map guest mem addr %lx", addr);
* Generate random MAC address for the interface
*/
if (hwaddr) {
- memcpy(addr, hwaddr, sizeof(addr));
+ clib_memcpy(addr, hwaddr, sizeof(addr));
} else {
f64 now = vlib_time_now(vm);
u32 rnd;
rnd = (u32) (now * 1e6);
rnd = random_u32 (&rnd);
- memcpy (addr+2, &rnd, sizeof(rnd));
+ clib_memcpy (addr+2, &rnd, sizeof(rnd));
addr[0] = 2;
addr[1] = 0xfe;
}
return 0;
}
+#if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
+static long get_huge_page_size(int fd)
+{
+ struct statfs s;
+ fstatfs(fd, &s);
+ return s.f_bsize;
+}
+#endif
+
#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
static clib_error_t *
dpdk_vhost_user_set_protocol_features(u32 hw_if_index, u64 prot_features)
{
*features = rte_vhost_feature_get();
+#if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
+#define OFFLOAD_FEATURES ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_CSUM) | \
+ (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6))
+
+ /* These are not suppoted as bridging/tunneling VHOST
+ * interfaces with hardware interfaces/drivers that does
+ * not support offloading breaks L4 traffic.
+ */
+ *features &= (~OFFLOAD_FEATURES);
+#endif
+
DBG_SOCK("supported features: 0x%lx", *features);
return 0;
}
}
static clib_error_t *
-dpdk_vhost_user_set_vring_addr(u32 hw_if_index, u8 idx, u64 desc, u64 used, u64 avail)
+dpdk_vhost_user_set_vring_addr(u32 hw_if_index, u8 idx, uword desc, \
+ uword used, uword avail, uword log)
{
dpdk_device_t * xd;
struct vhost_virtqueue *vq;
- DBG_SOCK("idx %u desc 0x%lx used 0x%lx avail 0x%lx",
- idx, desc, used, avail);
+ DBG_SOCK("idx %u desc 0x%lx used 0x%lx avail 0x%lx log 0x%lx",
+ idx, desc, used, avail, log);
if (!(xd = dpdk_vhost_user_device_from_hw_if_index(hw_if_index))) {
clib_warning("not a vhost-user interface");
vq->desc = (struct vring_desc *) qva_to_vva(&xd->vu_vhost_dev, desc);
vq->used = (struct vring_used *) qva_to_vva(&xd->vu_vhost_dev, used);
vq->avail = (struct vring_avail *) qva_to_vva(&xd->vu_vhost_dev, avail);
+#if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
+ vq->log_guest_addr = log;
+#endif
if (!(vq->desc && vq->used && vq->avail)) {
clib_warning("falied to set vring addr");
}
+ /*
+ * Inform the guest that there is no need to inform (kick) the
+ * host when it adds buffers. kick results in vmexit and will
+ * incur performance degradation.
+ *
+ * The below function sets a flag in used table. Therefore,
+ * should be initialized after initializing vq->used.
+ */
+ rte_vhost_enable_guest_notification(&xd->vu_vhost_dev, idx, 0);
stop_processing_packets(hw_if_index, idx);
return 0;
vq->desc = NULL;
vq->used = NULL;
vq->avail = NULL;
+#if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
+ vq->log_guest_addr = 0;
+#endif
/* Check if all Qs are disabled */
int numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
(cmsg->cmsg_type == SCM_RIGHTS) &&
(cmsg->cmsg_len - CMSG_LEN(0) <= VHOST_MEMORY_MAX_NREGIONS * sizeof(int))) {
number_of_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
- memcpy(fds, CMSG_DATA(cmsg), number_of_fds * sizeof(int));
+ clib_memcpy(fds, CMSG_DATA(cmsg), number_of_fds * sizeof(int));
}
/* version 1, no reply bit set*/
dpdk_vhost_user_set_vring_addr(xd->vlib_hw_if_index, msg.state.index,
msg.addr.desc_user_addr,
msg.addr.used_user_addr,
- msg.addr.avail_user_addr);
+ msg.addr.avail_user_addr,
+ msg.addr.log_guest_addr);
break;
case VHOST_USER_SET_OWNER:
break;
case VHOST_USER_SET_LOG_BASE:
+#if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
DBG_SOCK("if %d msg VHOST_USER_SET_LOG_BASE",
xd->vlib_hw_if_index);
+
+ if (msg.size != sizeof(msg.log)) {
+ DBG_SOCK("invalid msg size for VHOST_USER_SET_LOG_BASE: %u instead of %lu",
+ msg.size, sizeof(msg.log));
+ goto close_socket;
+ }
+
+ if (!(xd->vu_vhost_dev.protocol_features & (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD))) {
+ DBG_SOCK("VHOST_USER_PROTOCOL_F_LOG_SHMFD not set but VHOST_USER_SET_LOG_BASE received");
+ goto close_socket;
+ }
+
+ fd = fds[0];
+ /* align size to 2M page */
+ long page_sz = get_huge_page_size(fd);
+ ssize_t map_sz = (msg.log.size + msg.log.offset + page_sz) & ~(page_sz - 1);
+
+ void *addr = mmap(0, map_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+
+ DBG_SOCK("map log region addr 0 len 0x%lx off 0x%lx fd %d mapped %p",
+ map_sz, msg.log.offset, fd, addr);
+
+ if (addr == MAP_FAILED) {
+ clib_warning("failed to map memory. errno is %d", errno);
+ goto close_socket;
+ }
+
+ xd->vu_vhost_dev.log_base += pointer_to_uword(addr) + msg.log.offset;
+ xd->vu_vhost_dev.log_size = msg.log.size;
+ msg.flags |= VHOST_USER_REPLY_MASK;
+ msg.size = sizeof(msg.u64);
+#else
+ DBG_SOCK("if %d msg VHOST_USER_SET_LOG_BASE Not-Implemented",
+ xd->vlib_hw_if_index);
+#endif
break;
case VHOST_USER_SET_LOG_FD:
renumber, custom_dev_instance, hw);
vec_free(sock_filename);
+ vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index);
return 0;
}
vq->desc[j].len,
vq->desc[j].flags,
vq->desc[j].next,
- (u64) map_guest_mem(xd, vq->desc[j].addr));}
+ pointer_to_uword(map_guest_mem(xd, vq->desc[j].addr)));}
}
}
vlib_cli_output (vm, "\n");