Add atomic swap and store macro with acquire and release ordering
respectively. Variable in question is interupt_pending variable which
is used as guard variable by input nodes to process the device queue.
Atomic Swap is used with Acquire ordering as writes or reads following
this in program order should not be reordered before the swap.
Atomic Store is used with Release ordering, as post store the node is
added to pending list.
Change-Id: I1be49e91a15c58d0bf21ff5ba1bd37d5d7d12f7a
Original-patch-by: Damjan Marion <damarion@cisco.com>
Signed-off-by: Sirshak Das <sirshak.das@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
rt = vlib_node_get_runtime_data (vm, hw->input_node_index);
idx = hw->dq_runtime_index_by_queue[queue_id];
dq = vec_elt_at_index (rt->devices_and_queues, idx);
rt = vlib_node_get_runtime_data (vm, hw->input_node_index);
idx = hw->dq_runtime_index_by_queue[queue_id];
dq = vec_elt_at_index (rt->devices_and_queues, idx);
- dq->interrupt_pending = 1;
+
+ clib_atomic_store_rel_n (&(dq->interrupt_pending), 1);
vlib_node_set_interrupt_pending (vm, hw->input_node_index);
}
vlib_node_set_interrupt_pending (vm, hw->input_node_index);
}
+/*
+ * Acquire RMW Access
+ * Paired with Release Store in vnet_device_input_set_interrupt_pending
+ */
#define foreach_device_and_queue(var,vec) \
for (var = (vec); var < vec_end (vec); var++) \
if ((var->mode == VNET_HW_INTERFACE_RX_MODE_POLLING) \
#define foreach_device_and_queue(var,vec) \
for (var = (vec); var < vec_end (vec); var++) \
if ((var->mode == VNET_HW_INTERFACE_RX_MODE_POLLING) \
- || clib_smp_swap (&((var)->interrupt_pending), 0))
-
+ || clib_atomic_swap_acq_n (&((var)->interrupt_pending), 0))
#endif /* included_vnet_vnet_device_h */
#endif /* included_vnet_vnet_device_h */
vec_foreach (dq, rt->devices_and_queues)
{
vec_foreach (dq, rt->devices_and_queues)
{
- if (clib_smp_swap (&dq->interrupt_pending, 0) ||
- (node->state == VLIB_NODE_STATE_POLLING))
+ if ((node->state == VLIB_NODE_STATE_POLLING) ||
+ clib_atomic_swap_acq_n (&dq->interrupt_pending, 0))
{
vui =
pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
{
vui =
pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
#define clib_atomic_cmp_and_swap(addr,old,new) __sync_val_compare_and_swap(addr, old, new)
#define clib_atomic_bool_cmp_and_swap(addr,old,new) __sync_bool_compare_and_swap(addr, old, new)
#define clib_atomic_cmp_and_swap(addr,old,new) __sync_val_compare_and_swap(addr, old, new)
#define clib_atomic_bool_cmp_and_swap(addr,old,new) __sync_bool_compare_and_swap(addr, old, new)
#define clib_atomic_test_and_set(a) __sync_lock_test_and_set(a, 1)
#define clib_atomic_test_and_set(a) __sync_lock_test_and_set(a, 1)
#define clib_atomic_release(a) __sync_lock_release(a)
#define clib_atomic_release(a) __sync_lock_release(a)
+#define clib_atomic_store_rel_n(a, b) __atomic_store_n ((a), (b), __ATOMIC_RELEASE)
+#define clib_atomic_swap_acq_n(a, b) __atomic_exchange_n ((a), (b), __ATOMIC_ACQUIRE)
+
#endif /* included_clib_atomics_h */
#endif /* included_clib_atomics_h */
#include <vppinfra/cache.h>
#include <vppinfra/os.h> /* for os_panic */
#include <vppinfra/cache.h>
#include <vppinfra/os.h> /* for os_panic */
-#define clib_smp_swap(addr,new) __sync_lock_test_and_set(addr,new)
-
#if defined (i386) || defined (__x86_64__)
#define clib_smp_pause() do { asm volatile ("pause"); } while (0)
#elif defined (__aarch64__) || defined (__arm__)
#if defined (i386) || defined (__x86_64__)
#define clib_smp_pause() do { asm volatile ("pause"); } while (0)
#elif defined (__aarch64__) || defined (__arm__)