2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
19 #include <vppinfra/clib.h>
20 #include <vppinfra/atomics.h>
23 #define CLIB_PAUSE() __builtin_ia32_pause ()
29 #define CLIB_LOCK_DBG(_p) \
31 (*_p)->frame_address = __builtin_frame_address (0); \
32 (*_p)->pid = getpid (); \
33 (*_p)->thread_index = os_get_thread_index (); \
35 #define CLIB_LOCK_DBG_CLEAR(_p) \
37 (*_p)->frame_address = 0; \
39 (*_p)->thread_index = 0; \
42 #define CLIB_LOCK_DBG(_p)
43 #define CLIB_LOCK_DBG_CLEAR(_p)
46 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
47 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
51 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
61 clib_spinlock_init (clib_spinlock_t * p)
63 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
64 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
68 clib_spinlock_free (clib_spinlock_t * p)
72 clib_mem_free ((void *) *p);
77 static_always_inline void
78 clib_spinlock_lock (clib_spinlock_t * p)
81 while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0))
83 /* atomic load limits number of compare_exchange executions */
84 while (clib_atomic_load_relax_n (&(*p)->lock))
86 /* on failure, compare_exchange writes (*p)->lock into free */
92 static_always_inline void
93 clib_spinlock_lock_if_init (clib_spinlock_t * p)
95 if (PREDICT_FALSE (*p != 0))
96 clib_spinlock_lock (p);
99 static_always_inline void
100 clib_spinlock_unlock (clib_spinlock_t * p)
102 CLIB_LOCK_DBG_CLEAR (p);
103 /* Make sure all reads/writes are complete before releasing the lock */
104 clib_atomic_release (&(*p)->lock);
107 static_always_inline void
108 clib_spinlock_unlock_if_init (clib_spinlock_t * p)
110 if (PREDICT_FALSE (*p != 0))
111 clib_spinlock_unlock (p);
115 * Readers-Writer Lock
118 typedef struct clib_rw_lock_
120 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
121 /* -1 when W lock held, > 0 when R lock held */
131 clib_rwlock_init (clib_rwlock_t * p)
133 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
134 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
138 clib_rwlock_free (clib_rwlock_t * p)
142 clib_mem_free ((void *) *p);
148 clib_rwlock_reader_lock (clib_rwlock_t * p)
153 /* rwlock held by a writer */
154 while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) < 0)
157 while (!clib_atomic_cmp_and_swap_acq_relax_n
158 (&(*p)->rw_cnt, &cnt, cnt + 1, 1));
163 clib_rwlock_reader_unlock (clib_rwlock_t * p)
165 ASSERT ((*p)->rw_cnt > 0);
166 CLIB_LOCK_DBG_CLEAR (p);
167 clib_atomic_fetch_sub_rel (&(*p)->rw_cnt, 1);
171 clib_rwlock_writer_lock (clib_rwlock_t * p)
176 /* rwlock held by writer or reader(s) */
177 while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) != 0)
180 while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->rw_cnt, &cnt, -1, 1));
185 clib_rwlock_writer_unlock (clib_rwlock_t * p)
187 CLIB_LOCK_DBG_CLEAR (p);
188 clib_atomic_release (&(*p)->rw_cnt);
194 * fd.io coding-style-patch-verification: ON
197 * eval: (c-set-style "gnu")