#include <vnet/devices/devices.h>
#include <vnet/feature/feature.h>
#include <vnet/udp/udp_packet.h>
+#include <vnet/interface/rx_queue_funcs.h>
#include <vnet/devices/virtio/vhost_user.h>
#include <vnet/devices/virtio/vhost_user_inline.h>
ethernet_header_t *eh = (ethernet_header_t *) b0_data;
u16 ethertype = clib_net_to_host_u16 (eh->type);
u16 l2hdr_sz = sizeof (ethernet_header_t);
+ u32 oflags = 0;
if (ethernet_frame_is_tagged (ethertype))
{
{
ip4_header_t *ip4 = (ip4_header_t *) (b0_data + l2hdr_sz);
l4_proto = ip4->protocol;
- b0->flags |= VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
+ b0->flags |= VNET_BUFFER_F_IS_IP4;
+ oflags |= VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
}
else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
{
tcp_header_t *tcp = (tcp_header_t *)
(b0_data + vnet_buffer (b0)->l4_hdr_offset);
l4_hdr_sz = tcp_header_bytes (tcp);
- b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
}
else if (l4_proto == IP_PROTOCOL_UDP)
{
l4_hdr_sz = sizeof (udp_header_t);
- b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
+ oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
}
if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP)
vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
}
+
+ if (oflags)
+ vnet_buffer_offload_flags_set (b0, oflags);
}
static_always_inline void
}
static_always_inline u32
-vhost_user_if_input (vlib_main_t * vm,
- vhost_user_main_t * vum,
- vhost_user_intf_t * vui,
- u16 qid, vlib_node_runtime_t * node,
- vnet_hw_if_rx_mode mode, u8 enable_csum)
+vhost_user_if_input (vlib_main_t *vm, vhost_user_main_t *vum,
+ vhost_user_intf_t *vui, u16 qid,
+ vlib_node_runtime_t *node, u8 enable_csum)
{
vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
vnet_feature_main_t *fm = &feature_main;
* When the traffic subsides, the scheduler switches the node back to
* interrupt mode. We must tell the driver we want interrupt.
*/
- if (PREDICT_FALSE (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
+ if (PREDICT_FALSE (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
{
if ((node->flags &
VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
last_avail_idx++;
last_used_idx++;
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
-
vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
b_head->error = 0;
}
static_always_inline u32
-vhost_user_if_input_packed (vlib_main_t * vm, vhost_user_main_t * vum,
- vhost_user_intf_t * vui, u16 qid,
- vlib_node_runtime_t * node,
- vnet_hw_if_rx_mode mode, u8 enable_csum)
+vhost_user_if_input_packed (vlib_main_t *vm, vhost_user_main_t *vum,
+ vhost_user_intf_t *vui, u16 qid,
+ vlib_node_runtime_t *node, u8 enable_csum)
{
vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
vnet_feature_main_t *fm = &feature_main;
* When the traffic subsides, the scheduler switches the node back to
* interrupt mode. We must tell the driver we want interrupt.
*/
- if (PREDICT_FALSE (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
+ if (PREDICT_FALSE (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
{
if ((node->flags &
VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
b_head->total_length_not_including_first_buffer -=
b_head->current_length;
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
-
vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
vnet_buffer (b_head)->sw_if_index[VLIB_TX] = ~0;
b_head->error = 0;
vhost_user_main_t *vum = &vhost_user_main;
uword n_rx_packets = 0;
vhost_user_intf_t *vui;
- vnet_device_input_runtime_t *rt =
- (vnet_device_input_runtime_t *) node->runtime_data;
- vnet_device_and_queue_t *dq;
+ vnet_hw_if_rxq_poll_vector_t *pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
+ vnet_hw_if_rxq_poll_vector_t *pve;
- vec_foreach (dq, rt->devices_and_queues)
- {
- if ((node->state == VLIB_NODE_STATE_POLLING) ||
- clib_atomic_swap_acq_n (&dq->interrupt_pending, 0))
- {
- vui =
- pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
- if (vhost_user_is_packed_ring_supported (vui))
- {
- if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
- n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
- dq->queue_id, node,
- dq->mode, 1);
- else
- n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
- dq->queue_id, node,
- dq->mode, 0);
- }
- else
- {
- if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
- n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
- node, dq->mode, 1);
- else
- n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
- node, dq->mode, 0);
- }
- }
- }
+ vec_foreach (pve, pv)
+ {
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, pve->dev_instance);
+ if (vhost_user_is_packed_ring_supported (vui))
+ {
+ if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
+ n_rx_packets += vhost_user_if_input_packed (
+ vm, vum, vui, pve->queue_id, node, 1);
+ else
+ n_rx_packets += vhost_user_if_input_packed (
+ vm, vum, vui, pve->queue_id, node, 0);
+ }
+ else
+ {
+ if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
+ n_rx_packets +=
+ vhost_user_if_input (vm, vum, vui, pve->queue_id, node, 1);
+ else
+ n_rx_packets +=
+ vhost_user_if_input (vm, vum, vui, pve->queue_id, node, 0);
+ }
+ }
return n_rx_packets;
}