+static_always_inline uword
+virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, virtio_if_t * vif, u16 qid,
+ virtio_if_type_t type)
+{
+ virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
+ const int hdr_sz = vif->virtio_net_hdr_sz;
+ u16 txq_id = vm->thread_index % vif->num_txqs;
+ virtio_vring_t *txq_vring = vec_elt_at_index (vif->txq_vrings, txq_id);
+ uword rv;
+
+ if (clib_spinlock_trylock_if_init (&txq_vring->lockp))
+ {
+ if (vif->packet_coalesce)
+ vnet_gro_flow_table_schedule_node_on_dispatcher
+ (vm, txq_vring->flow_table);
+ else if (vif->packet_buffering)
+ virtio_vring_buffering_schedule_node_on_dispatcher
+ (vm, txq_vring->buffering);
+ clib_spinlock_unlock_if_init (&txq_vring->lockp);
+ }
+
+ if (vif->is_packed)
+ {
+ if (vif->gso_enabled)
+ rv =
+ virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
+ 1, 1, 1);
+ else if (vif->csum_offload_enabled)
+ rv =
+ virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
+ 0, 1, 1);
+ else
+ rv =
+ virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
+ 0, 0, 1);
+
+ virtio_refill_vring_packed (vm, vif, type, vring, hdr_sz,
+ node->node_index);
+ }
+ else
+ {
+ if (vif->gso_enabled)
+ rv =
+ virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
+ 1, 1, 0);
+ else if (vif->csum_offload_enabled)
+ rv =
+ virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
+ 0, 1, 0);
+ else
+ rv =
+ virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
+ 0, 0, 0);
+
+ virtio_refill_vring_split (vm, vif, type, vring, hdr_sz,
+ node->node_index);
+ }
+ return rv;
+}
+
+VLIB_NODE_FN (virtio_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)