vppinfra: add os_get_online_cpu_core() and os_get_online_cpu_node()
[vpp.git] / src / vppinfra / lock.h
index 49e849b..b7b3d00 100644 (file)
@@ -21,6 +21,8 @@
 
 #if __x86_64__
 #define CLIB_PAUSE() __builtin_ia32_pause ()
+#elif defined (__aarch64__) || defined (__arm__)
+#define CLIB_PAUSE() __asm__ ("yield")
 #else
 #define CLIB_PAUSE()
 #endif
@@ -46,7 +48,7 @@ do {                                                  \
 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
 
-typedef struct
+struct clib_spinlock_s
 {
   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
   u32 lock;
@@ -55,7 +57,9 @@ typedef struct
   uword thread_index;
   void *frame_address;
 #endif
-} *clib_spinlock_t;
+};
+
+typedef struct clib_spinlock_s *clib_spinlock_t;
 
 static inline void
 clib_spinlock_init (clib_spinlock_t * p)
@@ -89,6 +93,15 @@ clib_spinlock_lock (clib_spinlock_t * p)
   CLIB_LOCK_DBG (p);
 }
 
+static_always_inline int
+clib_spinlock_trylock (clib_spinlock_t * p)
+{
+  if (PREDICT_FALSE (CLIB_SPINLOCK_IS_LOCKED (p)))
+    return 0;
+  clib_spinlock_lock (p);
+  return 1;
+}
+
 static_always_inline void
 clib_spinlock_lock_if_init (clib_spinlock_t * p)
 {
@@ -96,6 +109,14 @@ clib_spinlock_lock_if_init (clib_spinlock_t * p)
     clib_spinlock_lock (p);
 }
 
+static_always_inline int
+clib_spinlock_trylock_if_init (clib_spinlock_t * p)
+{
+  if (PREDICT_FALSE (*p != 0))
+    return clib_spinlock_trylock (p);
+  return 1;
+}
+
 static_always_inline void
 clib_spinlock_unlock (clib_spinlock_t * p)
 {