* Guarantees that the LOAD and STORE operations generated before the
* barrier occur before the LOAD and STORE operations generated after.
*/
-#define rte_mb() {asm volatile("sync" : : : "memory"); }
+#define rte_mb() asm volatile("sync" : : : "memory")
/**
* Write memory barrier.
return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
}
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+{
+ return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
+}
+
/*------------------------- 32 bit atomic operations -------------------------*/
static inline int
return ret == 0;
}
+
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+{
+ return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
+}
+
/*------------------------- 64 bit atomic operations -------------------------*/
static inline int
{
return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
}
-
/**
* Atomically set a 64-bit counter to 0.
*
{
v->cnt = 0;
}
+
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+{
+ return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
+}
+
#endif
#ifdef __cplusplus