+static_always_inline vnet_device_and_queue_t *
+vnet_get_device_and_queue (vlib_main_t * vm, vlib_node_runtime_t * node)
+{
+ vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
+ return rt->devices_and_queues;
+}
+
+static_always_inline uword
+vnet_get_device_input_thread_index (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id)
+{
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ ASSERT (queue_id < vec_len (hw->input_node_thread_index_by_queue));
+ return hw->input_node_thread_index_by_queue[queue_id];
+}
+
+static_always_inline void
+vnet_device_input_set_interrupt_pending (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id)
+{
+ vlib_main_t *vm;
+ vnet_hw_interface_t *hw;
+ vnet_device_input_runtime_t *rt;
+ vnet_device_and_queue_t *dq;
+ uword idx;
+
+ hw = vnet_get_hw_interface (vnm, hw_if_index);
+ idx = vnet_get_device_input_thread_index (vnm, hw_if_index, queue_id);
+ vm = vlib_mains[idx];
+ rt = vlib_node_get_runtime_data (vm, hw->input_node_index);
+ idx = hw->dq_runtime_index_by_queue[queue_id];
+ dq = vec_elt_at_index (rt->devices_and_queues, idx);
+
+ clib_atomic_store_rel_n (&(dq->interrupt_pending), 1);
+
+ vlib_node_set_interrupt_pending (vm, hw->input_node_index);
+}
+
+/*
+ * Acquire RMW Access
+ * Paired with Release Store in vnet_device_input_set_interrupt_pending
+ */
+#define foreach_device_and_queue(var,vec) \
+ for (var = (vec); var < vec_end (vec); var++) \
+ if ((var->mode == VNET_HW_INTERFACE_RX_MODE_POLLING) \
+ || clib_atomic_swap_acq_n (&((var)->interrupt_pending), 0))
+