_ (VIRTIO_NET_F_CTRL_MAC_ADDR, 23) /* Set MAC address */ \
_ (VIRTIO_F_NOTIFY_ON_EMPTY, 24) \
_ (VHOST_F_LOG_ALL, 26) /* Log all write descriptors */ \
- _ (VIRTIO_F_ANY_LAYOUT, 27) /* Can the device handle any descripor layout */ \
+ _ (VIRTIO_F_ANY_LAYOUT, 27) /* Can the device handle any descriptor layout */ \
_ (VIRTIO_RING_F_INDIRECT_DESC, 28) /* Support indirect buffer descriptors */ \
_ (VIRTIO_RING_F_EVENT_IDX, 29) /* The Guest publishes the used index for which it expects an interrupt \
* at the end of the avail ring. Host should ignore the avail->flags field. */ \
#define VIRTIO_FEATURE(X) (1ULL << X)
+#define TX_QUEUE(X) ((X*2) + 1)
+#define RX_QUEUE(X) (X*2)
+#define TX_QUEUE_ACCESS(X) (X/2)
+#define RX_QUEUE_ACCESS(X) (X/2)
+
typedef enum
{
- VIRTIO_IF_TYPE_TAP,
+ VIRTIO_IF_TYPE_TAP = 1,
VIRTIO_IF_TYPE_PCI,
VIRTIO_IF_N_TYPES,
} virtio_if_type_t;
struct vring_desc *desc;
struct vring_used *used;
struct vring_avail *avail;
+ clib_spinlock_t lockp;
u16 desc_in_use;
u16 desc_next;
int kick_fd;
int call_fd;
+ u8 buffer_pool_index;
u16 size;
u16 queue_id;
+#define VRING_TX_OUT_OF_ORDER 1
u16 flags;
u32 call_file_index;
u32 *buffers;
- u32 *indirect_buffers;
u16 last_used_idx;
u16 last_kick_avail_idx;
} virtio_vring_t;
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 flags;
- clib_spinlock_t lockp;
u32 dev_instance;
u32 hw_if_index;
u32 sw_if_index;
+ u32 numa_node;
u16 virtio_net_hdr_sz;
virtio_if_type_t type;
union
pci_addr_t pci_addr;
};
u32 per_interface_next_index;
- int fd;
- union
- {
- int tap_fd;
- u32 pci_dev_handle;
- };
- virtio_vring_t *vrings;
-
+ int *vhost_fds;
+ int tap_fd;
+ u32 msix_enabled;
+ u32 pci_dev_handle;
+ virtio_vring_t *rxq_vrings;
+ virtio_vring_t *txq_vrings;
u64 features, remote_features;
/* error */
clib_error_t *error;
+ u8 support_int_mode; /* support interrupt mode */
u16 max_queue_pairs;
- u16 tx_ring_sz;
- u16 rx_ring_sz;
+ u16 num_rxqs;
+ u16 num_txqs;
u8 status;
u8 mac_addr[6];
- u64 bar[2];
u8 *host_if_name;
u8 *net_ns;
u8 *host_bridge;
u8 host_ip4_prefix_len;
ip6_address_t host_ip6_addr;
u8 host_ip6_prefix_len;
-
+ u32 host_mtu_size;
+ int gso_enabled;
+ int csum_offload_enabled;
int ifindex;
+ virtio_vring_t *cxq_vring;
} virtio_if_t;
typedef struct
{
+ /* logging */
+ vlib_log_class_t log_default;
+
virtio_if_t *interfaces;
} virtio_main_t;
clib_error_t *virtio_vring_init (vlib_main_t * vm, virtio_if_t * vif, u16 idx,
u16 sz);
-clib_error_t *virtio_vring_free (vlib_main_t * vm, virtio_if_t * vif,
+clib_error_t *virtio_vring_free_rx (vlib_main_t * vm, virtio_if_t * vif,
+ u32 idx);
+clib_error_t *virtio_vring_free_tx (vlib_main_t * vm, virtio_if_t * vif,
+ u32 idx);
+void virtio_vring_set_numa_node (vlib_main_t * vm, virtio_if_t * vif,
u32 idx);
extern void virtio_free_used_desc (vlib_main_t * vm, virtio_vring_t * vring);
extern void virtio_free_rx_buffers (vlib_main_t * vm, virtio_vring_t * vring);
extern void virtio_pci_legacy_notify_queue (vlib_main_t * vm,
virtio_if_t * vif, u16 queue_id);
format_function_t format_virtio_device_name;
+format_function_t format_virtio_log_name;
static_always_inline void
virtio_kick (vlib_main_t * vm, virtio_vring_t * vring, virtio_if_t * vif)
}
}
+
+#define virtio_log_debug(vif, f, ...) \
+{ \
+ vlib_log(VLIB_LOG_LEVEL_DEBUG, virtio_main.log_default, \
+ "%U: " f, format_virtio_log_name, vif, \
+ ##__VA_ARGS__); \
+};
+
+#define virtio_log_warning(vif, f, ...) \
+{ \
+ vlib_log(VLIB_LOG_LEVEL_WARNING, virtio_main.log_default, \
+ "%U: " f, format_virtio_log_name, vif, \
+ ##__VA_ARGS__); \
+};
+
+#define virtio_log_error(vif, f, ...) \
+{ \
+ vlib_log(VLIB_LOG_LEVEL_ERR, virtio_main.log_default, \
+ "%U: " f, format_virtio_log_name, vif, \
+ ##__VA_ARGS__); \
+};
+
#endif /* _VNET_DEVICES_VIRTIO_VIRTIO_H_ */
/*