2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
19 #include <vppinfra/clib.h>
22 #define CLIB_PAUSE() __builtin_ia32_pause ()
28 #define CLIB_LOCK_DBG(_p) \
30 (*_p)->frame_address = __builtin_frame_address (0); \
31 (*_p)->pid = getpid (); \
32 (*_p)->thread_index = os_get_thread_index (); \
34 #define CLIB_LOCK_DBG_CLEAR(_p) \
36 (*_p)->frame_address = 0; \
38 (*_p)->thread_index = 0; \
41 #define CLIB_LOCK_DBG(_p)
42 #define CLIB_LOCK_DBG_CLEAR(_p)
47 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
57 clib_spinlock_init (clib_spinlock_t * p)
59 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
60 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
64 clib_spinlock_free (clib_spinlock_t * p)
68 clib_mem_free ((void *) *p);
73 static_always_inline void
74 clib_spinlock_lock (clib_spinlock_t * p)
76 while (clib_atomic_test_and_set (&(*p)->lock))
81 static_always_inline void
82 clib_spinlock_lock_if_init (clib_spinlock_t * p)
84 if (PREDICT_FALSE (*p != 0))
85 clib_spinlock_lock (p);
88 static_always_inline void
89 clib_spinlock_unlock (clib_spinlock_t * p)
91 CLIB_LOCK_DBG_CLEAR (p);
92 /* Make sure all writes are complete before releasing the lock */
93 CLIB_MEMORY_BARRIER ();
97 static_always_inline void
98 clib_spinlock_unlock_if_init (clib_spinlock_t * p)
100 if (PREDICT_FALSE (*p != 0))
101 clib_spinlock_unlock (p);
105 * Readers-Writer Lock
108 typedef struct clib_rw_lock_
110 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
111 volatile u32 n_readers;
112 volatile u32 n_readers_lock;
113 volatile u32 writer_lock;
122 clib_rwlock_init (clib_rwlock_t * p)
124 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
125 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
129 clib_rwlock_free (clib_rwlock_t * p)
133 clib_mem_free ((void *) *p);
139 clib_rwlock_reader_lock (clib_rwlock_t * p)
141 while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
144 (*p)->n_readers += 1;
145 if ((*p)->n_readers == 1)
147 while (clib_atomic_test_and_set (&(*p)->writer_lock))
150 CLIB_MEMORY_BARRIER ();
151 (*p)->n_readers_lock = 0;
157 clib_rwlock_reader_unlock (clib_rwlock_t * p)
159 ASSERT ((*p)->n_readers > 0);
160 CLIB_LOCK_DBG_CLEAR (p);
162 while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
165 (*p)->n_readers -= 1;
166 if ((*p)->n_readers == 0)
168 CLIB_MEMORY_BARRIER ();
169 (*p)->writer_lock = 0;
172 CLIB_MEMORY_BARRIER ();
173 (*p)->n_readers_lock = 0;
177 clib_rwlock_writer_lock (clib_rwlock_t * p)
179 while (clib_atomic_test_and_set (&(*p)->writer_lock))
185 clib_rwlock_writer_unlock (clib_rwlock_t * p)
187 CLIB_LOCK_DBG_CLEAR (p);
188 CLIB_MEMORY_BARRIER ();
189 (*p)->writer_lock = 0;
195 * fd.io coding-style-patch-verification: ON
198 * eval: (c-set-style "gnu")