clib_spinlock_init (clib_spinlock_t * p)
{
*p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
- memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
+ clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
}
static inline void
static_always_inline void
clib_spinlock_lock (clib_spinlock_t * p)
{
- while (__sync_lock_test_and_set (&(*p)->lock, 1))
+ while (clib_atomic_test_and_set (&(*p)->lock))
CLIB_PAUSE ();
CLIB_LOCK_DBG (p);
}
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
volatile u32 n_readers;
+ volatile u32 n_readers_lock;
volatile u32 writer_lock;
#if CLIB_DEBUG > 0
pid_t pid;
clib_rwlock_init (clib_rwlock_t * p)
{
*p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
- memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
+ clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
}
always_inline void
always_inline void
clib_rwlock_reader_lock (clib_rwlock_t * p)
{
- if (__sync_fetch_and_add (&(*p)->n_readers, 1) == 0)
+ while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
+ CLIB_PAUSE ();
+
+ (*p)->n_readers += 1;
+ if ((*p)->n_readers == 1)
{
- while (__sync_lock_test_and_set (&(*p)->writer_lock, 1))
+ while (clib_atomic_test_and_set (&(*p)->writer_lock))
CLIB_PAUSE ();
}
+ CLIB_MEMORY_BARRIER ();
+ (*p)->n_readers_lock = 0;
+
CLIB_LOCK_DBG (p);
}
always_inline void
clib_rwlock_reader_unlock (clib_rwlock_t * p)
{
+ ASSERT ((*p)->n_readers > 0);
CLIB_LOCK_DBG_CLEAR (p);
- if (__sync_fetch_and_sub (&(*p)->n_readers, 1) == 1)
+
+ while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
+ CLIB_PAUSE ();
+
+ (*p)->n_readers -= 1;
+ if ((*p)->n_readers == 0)
{
CLIB_MEMORY_BARRIER ();
(*p)->writer_lock = 0;
}
+
+ CLIB_MEMORY_BARRIER ();
+ (*p)->n_readers_lock = 0;
}
always_inline void
clib_rwlock_writer_lock (clib_rwlock_t * p)
{
- while (__sync_lock_test_and_set (&(*p)->writer_lock, 1))
+ while (clib_atomic_test_and_set (&(*p)->writer_lock))
CLIB_PAUSE ();
CLIB_LOCK_DBG (p);
}