vppinfra: add bihash_init2
[vpp.git] / src / vppinfra / lock.h
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
18
19 #include <vppinfra/clib.h>
20 #include <vppinfra/atomics.h>
21
22 #if __x86_64__
23 #define CLIB_PAUSE() __builtin_ia32_pause ()
24 #else
25 #define CLIB_PAUSE()
26 #endif
27
28 #if CLIB_DEBUG > 1
29 #define CLIB_LOCK_DBG(_p)                               \
30 do {                                                    \
31     (*_p)->frame_address = __builtin_frame_address (0); \
32     (*_p)->pid = getpid ();                             \
33     (*_p)->thread_index = os_get_thread_index ();       \
34 } while (0)
35 #define CLIB_LOCK_DBG_CLEAR(_p)                         \
36 do {                                                    \
37     (*_p)->frame_address = 0;                           \
38     (*_p)->pid = 0;                                     \
39     (*_p)->thread_index = 0;                            \
40 } while (0)
41 #else
42 #define CLIB_LOCK_DBG(_p)
43 #define CLIB_LOCK_DBG_CLEAR(_p)
44 #endif
45
46 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
47 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
48
49 typedef struct
50 {
51   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
52   u32 lock;
53 #if CLIB_DEBUG > 0
54   pid_t pid;
55   uword thread_index;
56   void *frame_address;
57 #endif
58 } *clib_spinlock_t;
59
60 static inline void
61 clib_spinlock_init (clib_spinlock_t * p)
62 {
63   *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
64   clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
65 }
66
67 static inline void
68 clib_spinlock_free (clib_spinlock_t * p)
69 {
70   if (*p)
71     {
72       clib_mem_free ((void *) *p);
73       *p = 0;
74     }
75 }
76
77 static_always_inline void
78 clib_spinlock_lock (clib_spinlock_t * p)
79 {
80   u32 free = 0;
81   while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0))
82     {
83       /* atomic load limits number of compare_exchange executions */
84       while (clib_atomic_load_relax_n (&(*p)->lock))
85         CLIB_PAUSE ();
86       /* on failure, compare_exchange writes (*p)->lock into free */
87       free = 0;
88     }
89   CLIB_LOCK_DBG (p);
90 }
91
92 static_always_inline void
93 clib_spinlock_lock_if_init (clib_spinlock_t * p)
94 {
95   if (PREDICT_FALSE (*p != 0))
96     clib_spinlock_lock (p);
97 }
98
99 static_always_inline void
100 clib_spinlock_unlock (clib_spinlock_t * p)
101 {
102   CLIB_LOCK_DBG_CLEAR (p);
103   /* Make sure all reads/writes are complete before releasing the lock */
104   clib_atomic_release (&(*p)->lock);
105 }
106
107 static_always_inline void
108 clib_spinlock_unlock_if_init (clib_spinlock_t * p)
109 {
110   if (PREDICT_FALSE (*p != 0))
111     clib_spinlock_unlock (p);
112 }
113
114 /*
115  * Readers-Writer Lock
116  */
117
118 typedef struct clib_rw_lock_
119 {
120   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
121   /* -1 when W lock held, > 0 when R lock held */
122   volatile i32 rw_cnt;
123 #if CLIB_DEBUG > 0
124   pid_t pid;
125   uword thread_index;
126   void *frame_address;
127 #endif
128 } *clib_rwlock_t;
129
130 always_inline void
131 clib_rwlock_init (clib_rwlock_t * p)
132 {
133   *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
134   clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
135 }
136
137 always_inline void
138 clib_rwlock_free (clib_rwlock_t * p)
139 {
140   if (*p)
141     {
142       clib_mem_free ((void *) *p);
143       *p = 0;
144     }
145 }
146
147 always_inline void
148 clib_rwlock_reader_lock (clib_rwlock_t * p)
149 {
150   i32 cnt;
151   do
152     {
153       /* rwlock held by a writer */
154       while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) < 0)
155         CLIB_PAUSE ();
156     }
157   while (!clib_atomic_cmp_and_swap_acq_relax_n
158          (&(*p)->rw_cnt, &cnt, cnt + 1, 1));
159   CLIB_LOCK_DBG (p);
160 }
161
162 always_inline void
163 clib_rwlock_reader_unlock (clib_rwlock_t * p)
164 {
165   ASSERT ((*p)->rw_cnt > 0);
166   CLIB_LOCK_DBG_CLEAR (p);
167   clib_atomic_fetch_sub_rel (&(*p)->rw_cnt, 1);
168 }
169
170 always_inline void
171 clib_rwlock_writer_lock (clib_rwlock_t * p)
172 {
173   i32 cnt = 0;
174   do
175     {
176       /* rwlock held by writer or reader(s) */
177       while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) != 0)
178         CLIB_PAUSE ();
179     }
180   while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->rw_cnt, &cnt, -1, 1));
181   CLIB_LOCK_DBG (p);
182 }
183
184 always_inline void
185 clib_rwlock_writer_unlock (clib_rwlock_t * p)
186 {
187   CLIB_LOCK_DBG_CLEAR (p);
188   clib_atomic_release (&(*p)->rw_cnt);
189 }
190
191 #endif
192
193 /*
194  * fd.io coding-style-patch-verification: ON
195  *
196  * Local Variables:
197  * eval: (c-set-style "gnu")
198  * End:
199  */