vppinfra: refactor clib_spinlock_t to use compare and swap 60/20860/5
authorjaszha03 <jason.zhang2@arm.com>
Wed, 26 Jun 2019 22:06:59 +0000 (17:06 -0500)
committerDave Barach <openvpp@barachs.net>
Thu, 1 Aug 2019 11:54:17 +0000 (11:54 +0000)
Tested performance of a CAS implementation (using __atomic_compare_exchange)
against a TAS implementation (using __atomic_exchange) using test_spinlock.c
and found some performance improvement.

Generated assembly for CAS and TAS implementations show that TAS always
executes with a load-store dependency, but CAS moves a branch condition
between the load and store so that only a load occurs when the lock is free.

Benchmarking:
The results below are the cycle counts from test_spinlock.c, configured so
that for 10000 iterations, 12 threads on separate cores are spawned, each of
which increments a global counter 10000 times in each iteration. For
A72, 8 threads are spawned in each test.

x86 Xeon TAS: 7.333e8, 7.605e8, 7.535e8, 7.485e8, 7.321e8
x86 Xeon CAS: 5.842e8, 5.433e8, 5.389e8, 5.983e8, 5.552e8
Aarch64 ThX2* TAS: 9.852e7, 10.209e7, 9.190e7, 9.600e7, 9.224e7
Aarch64 ThX2* CAS: 7.640e7, 7.486e7, 7.425e7, 7.269e7, 7.534e7
A72 TAS: 7.289e6, 6.963e6, 7.208e6, 6.976e6, 7.200e6
A72 CAS: 1.695e6, 1.608e6, 1.600e6, 1.634e6, 1.746e6

*ThunderX2 used additional gcc options "-march=armv8.1-a+crc+crypto+lse"

Type: refactor

Change-Id: Ic5cd97991804f6b012707fad1a5d1a6edb96cd3d
Signed-off-by: Jason Zhang <jason.zhang2@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Lijian Zhang <Lijian.Zhang@arm.com>
src/vppinfra/atomics.h
src/vppinfra/lock.h

index df8e534..ce2177d 100644 (file)
 #define clib_atomic_cmp_and_swap(addr,old,new) __sync_val_compare_and_swap(addr, old, new)
 #define clib_atomic_bool_cmp_and_swap(addr,old,new) __sync_bool_compare_and_swap(addr, old, new)
 
+#define clib_atomic_cmp_and_swap_acq_relax_n(addr,exp,new,weak) __atomic_compare_exchange_n ((addr), (exp), (new), (weak), __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
+
 #define clib_atomic_test_and_set(a) __atomic_exchange_n(a, 1, __ATOMIC_ACQUIRE)
 #define clib_atomic_release(a) __atomic_store_n(a, 0, __ATOMIC_RELEASE)
 
 #define clib_atomic_fence_rel() __atomic_thread_fence(__ATOMIC_RELEASE);
 
+#define clib_atomic_load_relax_n(a) __atomic_load_n((a), __ATOMIC_RELAXED)
 #define clib_atomic_load_acq_n(a) __atomic_load_n((a), __ATOMIC_ACQUIRE)
 #define clib_atomic_store_rel_n(a, b) __atomic_store_n ((a), (b), __ATOMIC_RELEASE)
 
index cc6a7f0..c1f0b87 100644 (file)
@@ -17,6 +17,7 @@
 #define included_clib_lock_h
 
 #include <vppinfra/clib.h>
+#include <vppinfra/atomics.h>
 
 #if __x86_64__
 #define CLIB_PAUSE() __builtin_ia32_pause ()
@@ -76,8 +77,15 @@ clib_spinlock_free (clib_spinlock_t * p)
 static_always_inline void
 clib_spinlock_lock (clib_spinlock_t * p)
 {
-  while (clib_atomic_test_and_set (&(*p)->lock))
-    CLIB_PAUSE ();
+  u32 free = 0;
+  while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0))
+    {
+      /* atomic load limits number of compare_exchange executions */
+      while (clib_atomic_load_relax_n (&(*p)->lock))
+       CLIB_PAUSE ();
+      /* on failure, compare_exchange writes (*p)->lock into free */
+      free = 0;
+    }
   CLIB_LOCK_DBG (p);
 }