2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
19 #include <vppinfra/clib.h>
20 #include <vppinfra/atomics.h>
23 #define CLIB_PAUSE() __builtin_ia32_pause ()
24 #elif defined (__aarch64__) || defined (__arm__)
25 #define CLIB_PAUSE() __asm__ ("yield")
31 #define CLIB_LOCK_DBG(_p) \
33 (*_p)->frame_address = __builtin_frame_address (0); \
34 (*_p)->pid = getpid (); \
35 (*_p)->thread_index = os_get_thread_index (); \
37 #define CLIB_LOCK_DBG_CLEAR(_p) \
39 (*_p)->frame_address = 0; \
41 (*_p)->thread_index = 0; \
44 #define CLIB_LOCK_DBG(_p)
45 #define CLIB_LOCK_DBG_CLEAR(_p)
48 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
49 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
53 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
63 clib_spinlock_init (clib_spinlock_t * p)
65 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
66 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
70 clib_spinlock_free (clib_spinlock_t * p)
74 clib_mem_free ((void *) *p);
79 static_always_inline void
80 clib_spinlock_lock (clib_spinlock_t * p)
83 while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0))
85 /* atomic load limits number of compare_exchange executions */
86 while (clib_atomic_load_relax_n (&(*p)->lock))
88 /* on failure, compare_exchange writes (*p)->lock into free */
94 static_always_inline int
95 clib_spinlock_trylock (clib_spinlock_t * p)
97 if (PREDICT_FALSE (CLIB_SPINLOCK_IS_LOCKED (p)))
99 clib_spinlock_lock (p);
103 static_always_inline void
104 clib_spinlock_lock_if_init (clib_spinlock_t * p)
106 if (PREDICT_FALSE (*p != 0))
107 clib_spinlock_lock (p);
110 static_always_inline int
111 clib_spinlock_trylock_if_init (clib_spinlock_t * p)
113 if (PREDICT_FALSE (*p != 0))
114 return clib_spinlock_trylock (p);
118 static_always_inline void
119 clib_spinlock_unlock (clib_spinlock_t * p)
121 CLIB_LOCK_DBG_CLEAR (p);
122 /* Make sure all reads/writes are complete before releasing the lock */
123 clib_atomic_release (&(*p)->lock);
126 static_always_inline void
127 clib_spinlock_unlock_if_init (clib_spinlock_t * p)
129 if (PREDICT_FALSE (*p != 0))
130 clib_spinlock_unlock (p);
134 * Readers-Writer Lock
137 typedef struct clib_rw_lock_
139 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
140 /* -1 when W lock held, > 0 when R lock held */
150 clib_rwlock_init (clib_rwlock_t * p)
152 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
153 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
157 clib_rwlock_free (clib_rwlock_t * p)
161 clib_mem_free ((void *) *p);
167 clib_rwlock_reader_lock (clib_rwlock_t * p)
172 /* rwlock held by a writer */
173 while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) < 0)
176 while (!clib_atomic_cmp_and_swap_acq_relax_n
177 (&(*p)->rw_cnt, &cnt, cnt + 1, 1));
182 clib_rwlock_reader_unlock (clib_rwlock_t * p)
184 ASSERT ((*p)->rw_cnt > 0);
185 CLIB_LOCK_DBG_CLEAR (p);
186 clib_atomic_fetch_sub_rel (&(*p)->rw_cnt, 1);
190 clib_rwlock_writer_lock (clib_rwlock_t * p)
195 /* rwlock held by writer or reader(s) */
196 while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) != 0)
199 while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->rw_cnt, &cnt, -1, 1));
204 clib_rwlock_writer_unlock (clib_rwlock_t * p)
206 CLIB_LOCK_DBG_CLEAR (p);
207 clib_atomic_release (&(*p)->rw_cnt);
213 * fd.io coding-style-patch-verification: ON
216 * eval: (c-set-style "gnu")