2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
19 #include <vppinfra/clib.h>
20 #include <vppinfra/atomics.h>
23 #define CLIB_PAUSE() __builtin_ia32_pause ()
29 #define CLIB_LOCK_DBG(_p) \
31 (*_p)->frame_address = __builtin_frame_address (0); \
32 (*_p)->pid = getpid (); \
33 (*_p)->thread_index = os_get_thread_index (); \
35 #define CLIB_LOCK_DBG_CLEAR(_p) \
37 (*_p)->frame_address = 0; \
39 (*_p)->thread_index = 0; \
42 #define CLIB_LOCK_DBG(_p)
43 #define CLIB_LOCK_DBG_CLEAR(_p)
46 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
47 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
51 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
61 clib_spinlock_init (clib_spinlock_t * p)
63 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
64 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
68 clib_spinlock_free (clib_spinlock_t * p)
72 clib_mem_free ((void *) *p);
77 static_always_inline void
78 clib_spinlock_lock (clib_spinlock_t * p)
81 while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0))
83 /* atomic load limits number of compare_exchange executions */
84 while (clib_atomic_load_relax_n (&(*p)->lock))
86 /* on failure, compare_exchange writes (*p)->lock into free */
92 static_always_inline void
93 clib_spinlock_lock_if_init (clib_spinlock_t * p)
95 if (PREDICT_FALSE (*p != 0))
96 clib_spinlock_lock (p);
99 static_always_inline void
100 clib_spinlock_unlock (clib_spinlock_t * p)
102 CLIB_LOCK_DBG_CLEAR (p);
103 /* Make sure all reads/writes are complete before releasing the lock */
104 clib_atomic_release (&(*p)->lock);
107 static_always_inline void
108 clib_spinlock_unlock_if_init (clib_spinlock_t * p)
110 if (PREDICT_FALSE (*p != 0))
111 clib_spinlock_unlock (p);
115 * Readers-Writer Lock
118 typedef struct clib_rw_lock_
120 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
121 volatile u32 n_readers;
122 volatile u32 n_readers_lock;
123 volatile u32 writer_lock;
132 clib_rwlock_init (clib_rwlock_t * p)
134 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
135 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
139 clib_rwlock_free (clib_rwlock_t * p)
143 clib_mem_free ((void *) *p);
149 clib_rwlock_reader_lock (clib_rwlock_t * p)
151 while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
154 (*p)->n_readers += 1;
155 if ((*p)->n_readers == 1)
157 while (clib_atomic_test_and_set (&(*p)->writer_lock))
160 clib_atomic_release (&(*p)->n_readers_lock);
165 clib_rwlock_reader_unlock (clib_rwlock_t * p)
167 ASSERT ((*p)->n_readers > 0);
168 CLIB_LOCK_DBG_CLEAR (p);
170 while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
173 (*p)->n_readers -= 1;
174 if ((*p)->n_readers == 0)
176 clib_atomic_release (&(*p)->writer_lock);
178 clib_atomic_release (&(*p)->n_readers_lock);
182 clib_rwlock_writer_lock (clib_rwlock_t * p)
184 while (clib_atomic_test_and_set (&(*p)->writer_lock))
190 clib_rwlock_writer_unlock (clib_rwlock_t * p)
192 CLIB_LOCK_DBG_CLEAR (p);
193 clib_atomic_release (&(*p)->writer_lock);
199 * fd.io coding-style-patch-verification: ON
202 * eval: (c-set-style "gnu")