interface: rx queue infra rework, part one
[vpp.git] / src / vnet / devices / virtio / vhost_user_input.c
index 7ea70c6..62b59f6 100644 (file)
@@ -37,6 +37,7 @@
 #include <vnet/devices/devices.h>
 #include <vnet/feature/feature.h>
 #include <vnet/udp/udp_packet.h>
+#include <vnet/interface/rx_queue_funcs.h>
 
 #include <vnet/devices/virtio/vhost_user.h>
 #include <vnet/devices/virtio/vhost_user_inline.h>
@@ -372,11 +373,9 @@ vhost_user_input_setup_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
 }
 
 static_always_inline u32
-vhost_user_if_input (vlib_main_t * vm,
-                    vhost_user_main_t * vum,
-                    vhost_user_intf_t * vui,
-                    u16 qid, vlib_node_runtime_t * node,
-                    vnet_hw_if_rx_mode mode, u8 enable_csum)
+vhost_user_if_input (vlib_main_t *vm, vhost_user_main_t *vum,
+                    vhost_user_intf_t *vui, u16 qid,
+                    vlib_node_runtime_t *node, u8 enable_csum)
 {
   vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
   vnet_feature_main_t *fm = &feature_main;
@@ -411,7 +410,7 @@ vhost_user_if_input (vlib_main_t * vm,
    * When the traffic subsides, the scheduler switches the node back to
    * interrupt mode. We must tell the driver we want interrupt.
    */
-  if (PREDICT_FALSE (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
+  if (PREDICT_FALSE (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
     {
       if ((node->flags &
           VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
@@ -1081,10 +1080,9 @@ vhost_user_assemble_packet (vring_packed_desc_t * desc_table,
 }
 
 static_always_inline u32
-vhost_user_if_input_packed (vlib_main_t * vm, vhost_user_main_t * vum,
-                           vhost_user_intf_t * vui, u16 qid,
-                           vlib_node_runtime_t * node,
-                           vnet_hw_if_rx_mode mode, u8 enable_csum)
+vhost_user_if_input_packed (vlib_main_t *vm, vhost_user_main_t *vum,
+                           vhost_user_intf_t *vui, u16 qid,
+                           vlib_node_runtime_t *node, u8 enable_csum)
 {
   vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
   vnet_feature_main_t *fm = &feature_main;
@@ -1126,7 +1124,7 @@ vhost_user_if_input_packed (vlib_main_t * vm, vhost_user_main_t * vum,
    * When the traffic subsides, the scheduler switches the node back to
    * interrupt mode. We must tell the driver we want interrupt.
    */
-  if (PREDICT_FALSE (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
+  if (PREDICT_FALSE (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
     {
       if ((node->flags &
           VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
@@ -1415,39 +1413,31 @@ VLIB_NODE_FN (vhost_user_input_node) (vlib_main_t * vm,
   vhost_user_main_t *vum = &vhost_user_main;
   uword n_rx_packets = 0;
   vhost_user_intf_t *vui;
-  vnet_device_input_runtime_t *rt =
-    (vnet_device_input_runtime_t *) node->runtime_data;
-  vnet_device_and_queue_t *dq;
+  vnet_hw_if_rxq_poll_vector_t *pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
+  vnet_hw_if_rxq_poll_vector_t *pve;
 
-  vec_foreach (dq, rt->devices_and_queues)
-  {
-    if ((node->state == VLIB_NODE_STATE_POLLING) ||
-       clib_atomic_swap_acq_n (&dq->interrupt_pending, 0))
-      {
-       vui =
-         pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
-       if (vhost_user_is_packed_ring_supported (vui))
-         {
-           if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
-             n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
-                                                         dq->queue_id, node,
-                                                         dq->mode, 1);
-           else
-             n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
-                                                         dq->queue_id, node,
-                                                         dq->mode, 0);
-         }
-       else
-         {
-           if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
-             n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
-                                                  node, dq->mode, 1);
-           else
-             n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
-                                                  node, dq->mode, 0);
-         }
-      }
-  }
+  vec_foreach (pve, pv)
+    {
+      vui = pool_elt_at_index (vum->vhost_user_interfaces, pve->dev_instance);
+      if (vhost_user_is_packed_ring_supported (vui))
+       {
+         if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
+           n_rx_packets += vhost_user_if_input_packed (
+             vm, vum, vui, pve->queue_id, node, 1);
+         else
+           n_rx_packets += vhost_user_if_input_packed (
+             vm, vum, vui, pve->queue_id, node, 0);
+       }
+      else
+       {
+         if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
+           n_rx_packets +=
+             vhost_user_if_input (vm, vum, vui, pve->queue_id, node, 1);
+         else
+           n_rx_packets +=
+             vhost_user_if_input (vm, vum, vui, pve->queue_id, node, 0);
+       }
+    }
 
   return n_rx_packets;
 }