#define VIRTIO_F_RING_PACKED 34
-#define VRING_DESC_F_NEXT 1
-#define VRING_DESC_F_WRITE 2
-#define VRING_DESC_F_INDIRECT 4
-
-#define VRING_DESC_F_AVAIL (1ULL << 7)
-#define VRING_DESC_F_USED (1ULL << 15)
-
struct vring_packed_desc {
uint64_t addr;
uint32_t len;
uint16_t flags;
};
-#define VRING_EVENT_F_ENABLE 0x0
-#define VRING_EVENT_F_DISABLE 0x1
-#define VRING_EVENT_F_DESC 0x2
-
struct vring_packed_desc_event {
uint16_t off_wrap;
uint16_t flags;
};
#endif
+/*
+ * Declare below packed ring defines unconditionally
+ * as Kernel header might use different names.
+ */
+#define VRING_DESC_F_AVAIL (1ULL << 7)
+#define VRING_DESC_F_USED (1ULL << 15)
+
+#define VRING_EVENT_F_ENABLE 0x0
+#define VRING_EVENT_F_DISABLE 0x1
+#define VRING_EVENT_F_DESC 0x2
+
/*
* Available and used descs are in same order
*/
static inline bool
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- return wrap_counter == !!(desc->flags & VRING_DESC_F_AVAIL) &&
- wrap_counter != !!(desc->flags & VRING_DESC_F_USED);
+ uint16_t flags = *((volatile uint16_t *) &desc->flags);
+
+ return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
+ wrap_counter != !!(flags & VRING_DESC_F_USED);
}
#define VHOST_LOG_PAGE 4096
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
uint16_t old = vq->signalled_used;
uint16_t new = vq->last_used_idx;
+ bool signalled_used_valid = vq->signalled_used_valid;
+
+ vq->signalled_used = new;
+ vq->signalled_used_valid = true;
VHOST_LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
__func__,
vhost_used_event(vq),
old, new);
- if (vhost_need_event(vhost_used_event(vq), new, old)
- && (vq->callfd >= 0)) {
- vq->signalled_used = vq->last_used_idx;
+
+ if ((vhost_need_event(vhost_used_event(vq), new, old) &&
+ (vq->callfd >= 0)) ||
+ unlikely(!signalled_used_valid))
eventfd_write(vq->callfd, (eventfd_t) 1);
- }
} else {
/* Kick the guest if necessary. */
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
eventfd_write(vq->callfd, (eventfd_t)1);
}
+static __rte_always_inline void
+restore_mbuf(struct rte_mbuf *m)
+{
+ uint32_t mbuf_size, priv_size;
+
+ while (m) {
+ priv_size = rte_pktmbuf_priv_size(m->pool);
+ mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+ /* start of buffer is after mbuf structure and priv data */
+
+ m->buf_addr = (char *)m + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+ m = m->next;
+ }
+}
+
+static __rte_always_inline bool
+mbuf_is_consumed(struct rte_mbuf *m)
+{
+ while (m) {
+ if (rte_mbuf_refcnt_read(m) > 1)
+ return false;
+ m = m->next;
+ }
+
+ return true;
+}
+
+static __rte_always_inline void
+put_zmbuf(struct zcopy_mbuf *zmbuf)
+{
+ zmbuf->in_use = 0;
+}
+
#endif /* _VHOST_NET_CDEV_H_ */