2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
19 #include <vppinfra/clib.h>
22 #define CLIB_PAUSE() __builtin_ia32_pause ()
28 #define CLIB_LOCK_DBG(_p) \
30 (*_p)->frame_address = __builtin_frame_address (0); \
31 (*_p)->pid = getpid (); \
32 (*_p)->thread_index = os_get_thread_index (); \
34 #define CLIB_LOCK_DBG_CLEAR(_p) \
36 (*_p)->frame_address = 0; \
38 (*_p)->thread_index = 0; \
41 #define CLIB_LOCK_DBG(_p)
42 #define CLIB_LOCK_DBG_CLEAR(_p)
45 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
46 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
50 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
60 clib_spinlock_init (clib_spinlock_t * p)
62 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
63 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
67 clib_spinlock_free (clib_spinlock_t * p)
71 clib_mem_free ((void *) *p);
76 static_always_inline void
77 clib_spinlock_lock (clib_spinlock_t * p)
79 while (clib_atomic_test_and_set (&(*p)->lock))
84 static_always_inline void
85 clib_spinlock_lock_if_init (clib_spinlock_t * p)
87 if (PREDICT_FALSE (*p != 0))
88 clib_spinlock_lock (p);
91 static_always_inline void
92 clib_spinlock_unlock (clib_spinlock_t * p)
94 CLIB_LOCK_DBG_CLEAR (p);
95 /* Make sure all reads/writes are complete before releasing the lock */
96 clib_atomic_release (&(*p)->lock);
99 static_always_inline void
100 clib_spinlock_unlock_if_init (clib_spinlock_t * p)
102 if (PREDICT_FALSE (*p != 0))
103 clib_spinlock_unlock (p);
107 * Readers-Writer Lock
110 typedef struct clib_rw_lock_
112 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
113 volatile u32 n_readers;
114 volatile u32 n_readers_lock;
115 volatile u32 writer_lock;
124 clib_rwlock_init (clib_rwlock_t * p)
126 *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
127 clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
131 clib_rwlock_free (clib_rwlock_t * p)
135 clib_mem_free ((void *) *p);
141 clib_rwlock_reader_lock (clib_rwlock_t * p)
143 while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
146 (*p)->n_readers += 1;
147 if ((*p)->n_readers == 1)
149 while (clib_atomic_test_and_set (&(*p)->writer_lock))
152 clib_atomic_release (&(*p)->n_readers_lock);
157 clib_rwlock_reader_unlock (clib_rwlock_t * p)
159 ASSERT ((*p)->n_readers > 0);
160 CLIB_LOCK_DBG_CLEAR (p);
162 while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
165 (*p)->n_readers -= 1;
166 if ((*p)->n_readers == 0)
168 clib_atomic_release (&(*p)->writer_lock);
170 clib_atomic_release (&(*p)->n_readers_lock);
174 clib_rwlock_writer_lock (clib_rwlock_t * p)
176 while (clib_atomic_test_and_set (&(*p)->writer_lock))
182 clib_rwlock_writer_unlock (clib_rwlock_t * p)
184 CLIB_LOCK_DBG_CLEAR (p);
185 clib_atomic_release (&(*p)->writer_lock);
191 * fd.io coding-style-patch-verification: ON
194 * eval: (c-set-style "gnu")