X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Flock.h;h=3cfe11cba17684c194a9bcd1d29b646ba573f111;hb=d14fccd6d51569b0f025af57c830b873afcc18e3;hp=c60ff4146122d7bfa868e0ef8897967d7a6bdb30;hpb=1927da29ccbe1d4cc8e59ccfa197eb41c257814f;p=vpp.git diff --git a/src/vppinfra/lock.h b/src/vppinfra/lock.h index c60ff414612..3cfe11cba17 100644 --- a/src/vppinfra/lock.h +++ b/src/vppinfra/lock.h @@ -17,6 +17,36 @@ #define included_clib_lock_h #include +#include + +#if __x86_64__ +#define CLIB_PAUSE() __builtin_ia32_pause () +#elif defined (__aarch64__) || defined (__arm__) +#define CLIB_PAUSE() __asm__ ("yield") +#else +#define CLIB_PAUSE() +#endif + +#if CLIB_DEBUG > 1 +#define CLIB_LOCK_DBG(_p) \ +do { \ + (*_p)->frame_address = __builtin_frame_address (0); \ + (*_p)->pid = getpid (); \ + (*_p)->thread_index = os_get_thread_index (); \ +} while (0) +#define CLIB_LOCK_DBG_CLEAR(_p) \ +do { \ + (*_p)->frame_address = 0; \ + (*_p)->pid = 0; \ + (*_p)->thread_index = 0; \ +} while (0) +#else +#define CLIB_LOCK_DBG(_p) +#define CLIB_LOCK_DBG_CLEAR(_p) +#endif + +#define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock +#define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p))) typedef struct { @@ -24,7 +54,7 @@ typedef struct u32 lock; #if CLIB_DEBUG > 0 pid_t pid; - uword cpu_index; + uword thread_index; void *frame_address; #endif } *clib_spinlock_t; @@ -33,7 +63,7 @@ static inline void clib_spinlock_init (clib_spinlock_t * p) { *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES); - memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES); + clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES); } static inline void @@ -49,16 +79,16 @@ clib_spinlock_free (clib_spinlock_t * p) static_always_inline void clib_spinlock_lock (clib_spinlock_t * p) { - while (__sync_lock_test_and_set (&(*p)->lock, 1)) -#if __x86_64__ - __builtin_ia32_pause () -#endif - ; -#if CLIB_DEBUG > 0 - (*p)->frame_address = __builtin_frame_address (0); - (*p)->pid = getpid (); - (*p)->cpu_index = os_get_cpu_number (); -#endif + u32 free = 0; + while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0)) + { + /* atomic load limits number of compare_exchange executions */ + while (clib_atomic_load_relax_n (&(*p)->lock)) + CLIB_PAUSE (); + /* on failure, compare_exchange writes (*p)->lock into free */ + free = 0; + } + CLIB_LOCK_DBG (p); } static_always_inline void @@ -71,12 +101,9 @@ clib_spinlock_lock_if_init (clib_spinlock_t * p) static_always_inline void clib_spinlock_unlock (clib_spinlock_t * p) { - (*p)->lock = 0; -#if CLIB_DEBUG > 0 - (*p)->frame_address = 0; - (*p)->pid = 0; - (*p)->cpu_index = 0; -#endif + CLIB_LOCK_DBG_CLEAR (p); + /* Make sure all reads/writes are complete before releasing the lock */ + clib_atomic_release (&(*p)->lock); } static_always_inline void @@ -86,6 +113,83 @@ clib_spinlock_unlock_if_init (clib_spinlock_t * p) clib_spinlock_unlock (p); } +/* + * Readers-Writer Lock + */ + +typedef struct clib_rw_lock_ +{ + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); + /* -1 when W lock held, > 0 when R lock held */ + volatile i32 rw_cnt; +#if CLIB_DEBUG > 0 + pid_t pid; + uword thread_index; + void *frame_address; +#endif +} *clib_rwlock_t; + +always_inline void +clib_rwlock_init (clib_rwlock_t * p) +{ + *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES); + clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES); +} + +always_inline void +clib_rwlock_free (clib_rwlock_t * p) +{ + if (*p) + { + clib_mem_free ((void *) *p); + *p = 0; + } +} + +always_inline void +clib_rwlock_reader_lock (clib_rwlock_t * p) +{ + i32 cnt; + do + { + /* rwlock held by a writer */ + while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) < 0) + CLIB_PAUSE (); + } + while (!clib_atomic_cmp_and_swap_acq_relax_n + (&(*p)->rw_cnt, &cnt, cnt + 1, 1)); + CLIB_LOCK_DBG (p); +} + +always_inline void +clib_rwlock_reader_unlock (clib_rwlock_t * p) +{ + ASSERT ((*p)->rw_cnt > 0); + CLIB_LOCK_DBG_CLEAR (p); + clib_atomic_fetch_sub_rel (&(*p)->rw_cnt, 1); +} + +always_inline void +clib_rwlock_writer_lock (clib_rwlock_t * p) +{ + i32 cnt = 0; + do + { + /* rwlock held by writer or reader(s) */ + while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) != 0) + CLIB_PAUSE (); + } + while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->rw_cnt, &cnt, -1, 1)); + CLIB_LOCK_DBG (p); +} + +always_inline void +clib_rwlock_writer_unlock (clib_rwlock_t * p) +{ + CLIB_LOCK_DBG_CLEAR (p); + clib_atomic_release (&(*p)->rw_cnt); +} + #endif /*