From 4448957b1864bb1e640fec63cc6f61b1fb7e5b55 Mon Sep 17 00:00:00 2001 From: Steve Shin Date: Thu, 22 Sep 2016 12:08:55 -0700 Subject: [PATCH] Fix eventfd leakage issue in vhost-user mode The leakage happens on the file descriptors for kickfd and vring's callfd. Those file descriptors should be closed when vhost-interface is disconnected. Change-Id: I12453b0c3eac037a1dc040a001465059b8f672c2 Signed-off-by: Steve Shin --- vnet/vnet/devices/dpdk/vhost_user.c | 40 ++++++++++++++++++++++++++++++++--- vnet/vnet/devices/virtio/vhost-user.c | 23 +++++++++++++++++--- 2 files changed, 57 insertions(+), 6 deletions(-) diff --git a/vnet/vnet/devices/dpdk/vhost_user.c b/vnet/vnet/devices/dpdk/vhost_user.c index 946c6e1f1db..9e53c96f599 100644 --- a/vnet/vnet/devices/dpdk/vhost_user.c +++ b/vnet/vnet/devices/dpdk/vhost_user.c @@ -660,6 +660,15 @@ dpdk_vhost_user_get_vring_base (u32 hw_if_index, u8 idx, u32 * num) */ DBG_SOCK ("Stopping vring Q %u of device %d", idx, hw_if_index); dpdk_vu_intf_t *vui = xd->vu_intf; + + /* if there is old fd, delete it */ + if (vui->vrings[idx].callfd > 0) + { + unix_file_t *uf = pool_elt_at_index (unix_main.file_pool, + vui->vrings[idx].callfd_idx); + unix_file_del (&unix_main, uf); + } + vui->vrings[idx].enabled = 0; /* Reset local copy */ vui->vrings[idx].callfd = -1; /* Reset FD */ vq->enabled = 0; @@ -833,7 +842,7 @@ dpdk_vhost_user_set_vring_call (u32 hw_if_index, u8 idx, int fd) dpdk_vu_intf_t *vui = xd->vu_intf; /* if there is old fd, delete it */ - if (vui->vrings[idx].callfd > 0) + if (vui->vrings[idx].callfd > -1) { unix_file_t *uf = pool_elt_at_index (unix_main.file_pool, vui->vrings[idx].callfd_idx); @@ -863,7 +872,7 @@ dpdk_vhost_user_want_interrupt (dpdk_device_t * xd, int idx) struct vhost_virtqueue *vq = xd->vu_vhost_dev.virtqueue[idx]; /* return if vm is interested in interrupts */ - return (vring->callfd > 0) + return (vring->callfd > -1) && !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT); } @@ -881,7 +890,8 @@ dpdk_vhost_user_send_interrupt (vlib_main_t * vm, dpdk_device_t * xd, int idx) struct vhost_virtqueue *vq = xd->vu_vhost_dev.virtqueue[idx]; /* if vm is interested in interrupts */ - if ((vring->callfd > 0) && !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) + if ((vring->callfd > -1) + && !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { eventfd_write (vring->callfd, (eventfd_t) 1); vring->n_since_last_int = 0; @@ -901,6 +911,7 @@ dpdk_vhost_user_vui_init (vnet_main_t * vnm, const char *sock_filename, u8 is_server, u64 feature_mask, u32 * sw_if_index) { + int q; dpdk_vu_intf_t *vui = xd->vu_intf; memset (vui, 0, sizeof (*vui)); @@ -916,6 +927,13 @@ dpdk_vhost_user_vui_init (vnet_main_t * vnm, vui->active = 1; vui->unix_file_index = ~0; + for (q = 0; q < vui->num_vrings; q++) + { + vui->vrings[q].enabled = 0; + vui->vrings[q].callfd = -1; + vui->vrings[q].kickfd = -1; + } + vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0); if (sw_if_index) @@ -997,6 +1015,19 @@ dpdk_vhost_user_if_disconnect (dpdk_device_t * xd) for (q = 0; q < vui->num_vrings; q++) { vq = xd->vu_vhost_dev.virtqueue[q]; + if (vui->vrings[q].callfd > -1) + { + unix_file_t *uf = pool_elt_at_index (unix_main.file_pool, + vui->vrings[q].callfd_idx); + unix_file_del (&unix_main, uf); + } + + if (vui->vrings[q].kickfd > -1) + { + close (vui->vrings[q].kickfd); + vui->vrings[q].kickfd = -1; + } + vui->vrings[q].enabled = 0; /* Reset local copy */ vui->vrings[q].callfd = -1; /* Reset FD */ vq->enabled = 0; @@ -1200,6 +1231,9 @@ dpdk_vhost_user_socket_read (unix_file_t * uf) if (number_of_fds != 1) goto close_socket; + if (vui->vrings[q].kickfd > -1) + close (vui->vrings[q].kickfd); + vui->vrings[q].kickfd = fds[0]; } else diff --git a/vnet/vnet/devices/virtio/vhost-user.c b/vnet/vnet/devices/virtio/vhost-user.c index 6182ffa22cc..0f94e131256 100644 --- a/vnet/vnet/devices/virtio/vhost-user.c +++ b/vnet/vnet/devices/virtio/vhost-user.c @@ -301,6 +301,18 @@ vhost_user_if_disconnect (vhost_user_intf_t * vui) vui->is_up = 0; for (q = 0; q < vui->num_vrings; q++) { + if (vui->vrings[q].callfd > -1) + { + unix_file_t *uf = pool_elt_at_index (unix_main.file_pool, + vui->vrings[q].callfd_idx); + unix_file_del (&unix_main, uf); + } + + if (vui->vrings[q].kickfd > -1) + close (vui->vrings[q].kickfd); + + vui->vrings[q].callfd = -1; + vui->vrings[q].kickfd = -1; vui->vrings[q].desc = NULL; vui->vrings[q].avail = NULL; vui->vrings[q].used = NULL; @@ -593,7 +605,7 @@ vhost_user_socket_read (unix_file_t * uf) goto close_socket; /* if there is old fd, delete it */ - if (vui->vrings[q].callfd) + if (vui->vrings[q].callfd > -1) { unix_file_t *uf = pool_elt_at_index (unix_main.file_pool, vui->vrings[q].callfd_idx); @@ -619,6 +631,9 @@ vhost_user_socket_read (unix_file_t * uf) if (number_of_fds != 1) goto close_socket; + if (vui->vrings[q].kickfd > -1) + close (vui->vrings[q].kickfd); + vui->vrings[q].kickfd = fds[0]; } else @@ -1283,7 +1298,7 @@ vhost_user_if_input (vlib_main_t * vm, } /* interrupt (call) handling */ - if ((txvq->callfd > 0) && !(txvq->avail->flags & 1)) + if ((txvq->callfd > -1) && !(txvq->avail->flags & 1)) { txvq->n_since_last_int += n_rx_packets; @@ -1615,7 +1630,7 @@ done: vhost_user_log_dirty_ring (vui, rxvq, idx); /* interrupt (call) handling */ - if ((rxvq->callfd > 0) && !(rxvq->avail->flags & 1)) + if ((rxvq->callfd > -1) && !(rxvq->avail->flags & 1)) { rxvq->n_since_last_int += n_packets - n_left; @@ -1939,6 +1954,8 @@ vhost_user_vui_init (vnet_main_t * vnm, for (q = 0; q < 2; q++) { vui->vrings[q].enabled = 0; + vui->vrings[q].callfd = -1; + vui->vrings[q].kickfd = -1; } vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0); -- 2.16.6