*/
DBG_SOCK ("Stopping vring Q %u of device %d", idx, hw_if_index);
dpdk_vu_intf_t *vui = xd->vu_intf;
+
+ /* if there is old fd, delete it */
+ if (vui->vrings[idx].callfd > 0)
+ {
+ unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
+ vui->vrings[idx].callfd_idx);
+ unix_file_del (&unix_main, uf);
+ }
+
vui->vrings[idx].enabled = 0; /* Reset local copy */
vui->vrings[idx].callfd = -1; /* Reset FD */
vq->enabled = 0;
dpdk_vu_intf_t *vui = xd->vu_intf;
/* if there is old fd, delete it */
- if (vui->vrings[idx].callfd > 0)
+ if (vui->vrings[idx].callfd > -1)
{
unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
vui->vrings[idx].callfd_idx);
struct vhost_virtqueue *vq = xd->vu_vhost_dev.virtqueue[idx];
/* return if vm is interested in interrupts */
- return (vring->callfd > 0)
+ return (vring->callfd > -1)
&& !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
}
struct vhost_virtqueue *vq = xd->vu_vhost_dev.virtqueue[idx];
/* if vm is interested in interrupts */
- if ((vring->callfd > 0) && !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ if ((vring->callfd > -1)
+ && !(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
{
eventfd_write (vring->callfd, (eventfd_t) 1);
vring->n_since_last_int = 0;
const char *sock_filename,
u8 is_server, u64 feature_mask, u32 * sw_if_index)
{
+ int q;
dpdk_vu_intf_t *vui = xd->vu_intf;
memset (vui, 0, sizeof (*vui));
vui->active = 1;
vui->unix_file_index = ~0;
+ for (q = 0; q < vui->num_vrings; q++)
+ {
+ vui->vrings[q].enabled = 0;
+ vui->vrings[q].callfd = -1;
+ vui->vrings[q].kickfd = -1;
+ }
+
vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
if (sw_if_index)
for (q = 0; q < vui->num_vrings; q++)
{
vq = xd->vu_vhost_dev.virtqueue[q];
+ if (vui->vrings[q].callfd > -1)
+ {
+ unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
+ vui->vrings[q].callfd_idx);
+ unix_file_del (&unix_main, uf);
+ }
+
+ if (vui->vrings[q].kickfd > -1)
+ {
+ close (vui->vrings[q].kickfd);
+ vui->vrings[q].kickfd = -1;
+ }
+
vui->vrings[q].enabled = 0; /* Reset local copy */
vui->vrings[q].callfd = -1; /* Reset FD */
vq->enabled = 0;
if (number_of_fds != 1)
goto close_socket;
+ if (vui->vrings[q].kickfd > -1)
+ close (vui->vrings[q].kickfd);
+
vui->vrings[q].kickfd = fds[0];
}
else
vui->is_up = 0;
for (q = 0; q < vui->num_vrings; q++)
{
+ if (vui->vrings[q].callfd > -1)
+ {
+ unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
+ vui->vrings[q].callfd_idx);
+ unix_file_del (&unix_main, uf);
+ }
+
+ if (vui->vrings[q].kickfd > -1)
+ close (vui->vrings[q].kickfd);
+
+ vui->vrings[q].callfd = -1;
+ vui->vrings[q].kickfd = -1;
vui->vrings[q].desc = NULL;
vui->vrings[q].avail = NULL;
vui->vrings[q].used = NULL;
goto close_socket;
/* if there is old fd, delete it */
- if (vui->vrings[q].callfd)
+ if (vui->vrings[q].callfd > -1)
{
unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
vui->vrings[q].callfd_idx);
if (number_of_fds != 1)
goto close_socket;
+ if (vui->vrings[q].kickfd > -1)
+ close (vui->vrings[q].kickfd);
+
vui->vrings[q].kickfd = fds[0];
}
else
}
/* interrupt (call) handling */
- if ((txvq->callfd > 0) && !(txvq->avail->flags & 1))
+ if ((txvq->callfd > -1) && !(txvq->avail->flags & 1))
{
txvq->n_since_last_int += n_rx_packets;
vhost_user_log_dirty_ring (vui, rxvq, idx);
/* interrupt (call) handling */
- if ((rxvq->callfd > 0) && !(rxvq->avail->flags & 1))
+ if ((rxvq->callfd > -1) && !(rxvq->avail->flags & 1))
{
rxvq->n_since_last_int += n_packets - n_left;
for (q = 0; q < 2; q++)
{
vui->vrings[q].enabled = 0;
+ vui->vrings[q].callfd = -1;
+ vui->vrings[q].kickfd = -1;
}
vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);