2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
19 #include <vppinfra/clib.h>
20 #include <vppinfra/atomics.h>
23 #define CLIB_PAUSE() __builtin_ia32_pause ()
24 #elif defined (__aarch64__) || defined (__arm__)
25 #define CLIB_PAUSE() __asm__ ("yield")
31 #define CLIB_LOCK_DBG(_p) \
33 (*_p)->frame_address = __builtin_frame_address (0); \
34 (*_p)->pid = getpid (); \
35 (*_p)->thread_index = os_get_thread_index (); \
37 #define CLIB_LOCK_DBG_CLEAR(_p) \
39 (*_p)->frame_address = 0; \
41 (*_p)->thread_index = 0; \
44 #define CLIB_LOCK_DBG(_p)
45 #define CLIB_LOCK_DBG_CLEAR(_p)
48 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
49 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
53 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
63 clib_spinlock_init (clib_spinlock_t * p)
65 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
66 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
70 clib_spinlock_free (clib_spinlock_t * p)
74 clib_mem_free ((void *) *p);
79 static_always_inline void
80 clib_spinlock_lock (clib_spinlock_t * p)
83 while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0))
85 /* atomic load limits number of compare_exchange executions */
86 while (clib_atomic_load_relax_n (&(*p)->lock))
88 /* on failure, compare_exchange writes (*p)->lock into free */
94 static_always_inline void
95 clib_spinlock_lock_if_init (clib_spinlock_t * p)
97 if (PREDICT_FALSE (*p != 0))
98 clib_spinlock_lock (p);
101 static_always_inline void
102 clib_spinlock_unlock (clib_spinlock_t * p)
104 CLIB_LOCK_DBG_CLEAR (p);
105 /* Make sure all reads/writes are complete before releasing the lock */
106 clib_atomic_release (&(*p)->lock);
109 static_always_inline void
110 clib_spinlock_unlock_if_init (clib_spinlock_t * p)
112 if (PREDICT_FALSE (*p != 0))
113 clib_spinlock_unlock (p);
117 * Readers-Writer Lock
120 typedef struct clib_rw_lock_
122 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
123 /* -1 when W lock held, > 0 when R lock held */
133 clib_rwlock_init (clib_rwlock_t * p)
135 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
136 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
140 clib_rwlock_free (clib_rwlock_t * p)
144 clib_mem_free ((void *) *p);
150 clib_rwlock_reader_lock (clib_rwlock_t * p)
155 /* rwlock held by a writer */
156 while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) < 0)
159 while (!clib_atomic_cmp_and_swap_acq_relax_n
160 (&(*p)->rw_cnt, &cnt, cnt + 1, 1));
165 clib_rwlock_reader_unlock (clib_rwlock_t * p)
167 ASSERT ((*p)->rw_cnt > 0);
168 CLIB_LOCK_DBG_CLEAR (p);
169 clib_atomic_fetch_sub_rel (&(*p)->rw_cnt, 1);
173 clib_rwlock_writer_lock (clib_rwlock_t * p)
178 /* rwlock held by writer or reader(s) */
179 while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) != 0)
182 while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->rw_cnt, &cnt, -1, 1));
187 clib_rwlock_writer_unlock (clib_rwlock_t * p)
189 CLIB_LOCK_DBG_CLEAR (p);
190 clib_atomic_release (&(*p)->rw_cnt);
196 * fd.io coding-style-patch-verification: ON
199 * eval: (c-set-style "gnu")