Enable atomic swap and store macro with acquire and release ordering
[vpp.git] / src / vnet / devices / devices.h
index a5cbc35..cc960c9 100644 (file)
@@ -50,11 +50,49 @@ typedef struct
 typedef struct
 {
   vnet_device_per_worker_data_t *workers;
+  uword first_worker_thread_index;
+  uword last_worker_thread_index;
+  uword next_worker_thread_index;
 } vnet_device_main_t;
 
+typedef struct
+{
+  u32 hw_if_index;
+  u32 dev_instance;
+  u16 queue_id;
+  vnet_hw_interface_rx_mode mode;
+  u32 interrupt_pending;
+} vnet_device_and_queue_t;
+
+typedef struct
+{
+  vnet_device_and_queue_t *devices_and_queues;
+  vlib_node_state_t enabled_node_state;
+} vnet_device_input_runtime_t;
+
 extern vnet_device_main_t vnet_device_main;
 extern vlib_node_registration_t device_input_node;
 extern const u32 device_input_next_node_advance[];
+extern const u32 device_input_next_node_flags[];
+
+static inline void
+vnet_hw_interface_set_input_node (vnet_main_t * vnm, u32 hw_if_index,
+                                 u32 node_index)
+{
+  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+  hw->input_node_index = node_index;
+}
+
+void vnet_hw_interface_assign_rx_thread (vnet_main_t * vnm, u32 hw_if_index,
+                                        u16 queue_id, uword thread_index);
+int vnet_hw_interface_unassign_rx_thread (vnet_main_t * vnm, u32 hw_if_index,
+                                         u16 queue_id);
+int vnet_hw_interface_set_rx_mode (vnet_main_t * vnm, u32 hw_if_index,
+                                  u16 queue_id,
+                                  vnet_hw_interface_rx_mode mode);
+int vnet_hw_interface_get_rx_mode (vnet_main_t * vnm, u32 hw_if_index,
+                                  u16 queue_id,
+                                  vnet_hw_interface_rx_mode * mode);
 
 static inline u64
 vnet_get_aggregate_rx_packets (void)
@@ -69,15 +107,62 @@ vnet_get_aggregate_rx_packets (void)
 }
 
 static inline void
-vnet_device_increment_rx_packets (u32 cpu_index, u64 count)
+vnet_device_increment_rx_packets (u32 thread_index, u64 count)
 {
   vnet_device_main_t *vdm = &vnet_device_main;
   vnet_device_per_worker_data_t *pwd;
 
-  pwd = vec_elt_at_index (vdm->workers, cpu_index);
+  pwd = vec_elt_at_index (vdm->workers, thread_index);
   pwd->aggregate_rx_packets += count;
 }
 
+static_always_inline vnet_device_and_queue_t *
+vnet_get_device_and_queue (vlib_main_t * vm, vlib_node_runtime_t * node)
+{
+  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
+  return rt->devices_and_queues;
+}
+
+static_always_inline uword
+vnet_get_device_input_thread_index (vnet_main_t * vnm, u32 hw_if_index,
+                                   u16 queue_id)
+{
+  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+  ASSERT (queue_id < vec_len (hw->input_node_thread_index_by_queue));
+  return hw->input_node_thread_index_by_queue[queue_id];
+}
+
+static_always_inline void
+vnet_device_input_set_interrupt_pending (vnet_main_t * vnm, u32 hw_if_index,
+                                        u16 queue_id)
+{
+  vlib_main_t *vm;
+  vnet_hw_interface_t *hw;
+  vnet_device_input_runtime_t *rt;
+  vnet_device_and_queue_t *dq;
+  uword idx;
+
+  hw = vnet_get_hw_interface (vnm, hw_if_index);
+  idx = vnet_get_device_input_thread_index (vnm, hw_if_index, queue_id);
+  vm = vlib_mains[idx];
+  rt = vlib_node_get_runtime_data (vm, hw->input_node_index);
+  idx = hw->dq_runtime_index_by_queue[queue_id];
+  dq = vec_elt_at_index (rt->devices_and_queues, idx);
+
+  clib_atomic_store_rel_n (&(dq->interrupt_pending), 1);
+
+  vlib_node_set_interrupt_pending (vm, hw->input_node_index);
+}
+
+/*
+ * Acquire RMW Access
+ * Paired with Release Store in vnet_device_input_set_interrupt_pending
+ */
+#define foreach_device_and_queue(var,vec)                       \
+  for (var = (vec); var < vec_end (vec); var++)                 \
+    if ((var->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)        \
+        || clib_atomic_swap_acq_n (&((var)->interrupt_pending), 0))
+
 #endif /* included_vnet_vnet_device_h */
 
 /*