vppinfra: remove unused historical code
[vpp.git] / src / vppinfra / lock.h
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
18
19 #include <vppinfra/clib.h>
20 #include <vppinfra/atomics.h>
21
22 #if __x86_64__
23 #define CLIB_PAUSE() __builtin_ia32_pause ()
24 #else
25 #define CLIB_PAUSE()
26 #endif
27
28 #if CLIB_DEBUG > 1
29 #define CLIB_LOCK_DBG(_p)                               \
30 do {                                                    \
31     (*_p)->frame_address = __builtin_frame_address (0); \
32     (*_p)->pid = getpid ();                             \
33     (*_p)->thread_index = os_get_thread_index ();       \
34 } while (0)
35 #define CLIB_LOCK_DBG_CLEAR(_p)                         \
36 do {                                                    \
37     (*_p)->frame_address = 0;                           \
38     (*_p)->pid = 0;                                     \
39     (*_p)->thread_index = 0;                            \
40 } while (0)
41 #else
42 #define CLIB_LOCK_DBG(_p)
43 #define CLIB_LOCK_DBG_CLEAR(_p)
44 #endif
45
46 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
47 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
48
49 typedef struct
50 {
51   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
52   u32 lock;
53 #if CLIB_DEBUG > 0
54   pid_t pid;
55   uword thread_index;
56   void *frame_address;
57 #endif
58 } *clib_spinlock_t;
59
60 static inline void
61 clib_spinlock_init (clib_spinlock_t * p)
62 {
63   *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
64   clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
65 }
66
67 static inline void
68 clib_spinlock_free (clib_spinlock_t * p)
69 {
70   if (*p)
71     {
72       clib_mem_free ((void *) *p);
73       *p = 0;
74     }
75 }
76
77 static_always_inline void
78 clib_spinlock_lock (clib_spinlock_t * p)
79 {
80   u32 free = 0;
81   while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0))
82     {
83       /* atomic load limits number of compare_exchange executions */
84       while (clib_atomic_load_relax_n (&(*p)->lock))
85         CLIB_PAUSE ();
86       /* on failure, compare_exchange writes (*p)->lock into free */
87       free = 0;
88     }
89   CLIB_LOCK_DBG (p);
90 }
91
92 static_always_inline void
93 clib_spinlock_lock_if_init (clib_spinlock_t * p)
94 {
95   if (PREDICT_FALSE (*p != 0))
96     clib_spinlock_lock (p);
97 }
98
99 static_always_inline void
100 clib_spinlock_unlock (clib_spinlock_t * p)
101 {
102   CLIB_LOCK_DBG_CLEAR (p);
103   /* Make sure all reads/writes are complete before releasing the lock */
104   clib_atomic_release (&(*p)->lock);
105 }
106
107 static_always_inline void
108 clib_spinlock_unlock_if_init (clib_spinlock_t * p)
109 {
110   if (PREDICT_FALSE (*p != 0))
111     clib_spinlock_unlock (p);
112 }
113
114 /*
115  * Readers-Writer Lock
116  */
117
118 typedef struct clib_rw_lock_
119 {
120   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
121   volatile u32 n_readers;
122   volatile u32 n_readers_lock;
123   volatile u32 writer_lock;
124 #if CLIB_DEBUG > 0
125   pid_t pid;
126   uword thread_index;
127   void *frame_address;
128 #endif
129 } *clib_rwlock_t;
130
131 always_inline void
132 clib_rwlock_init (clib_rwlock_t * p)
133 {
134   *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
135   clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
136 }
137
138 always_inline void
139 clib_rwlock_free (clib_rwlock_t * p)
140 {
141   if (*p)
142     {
143       clib_mem_free ((void *) *p);
144       *p = 0;
145     }
146 }
147
148 always_inline void
149 clib_rwlock_reader_lock (clib_rwlock_t * p)
150 {
151   while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
152     CLIB_PAUSE ();
153
154   (*p)->n_readers += 1;
155   if ((*p)->n_readers == 1)
156     {
157       while (clib_atomic_test_and_set (&(*p)->writer_lock))
158         CLIB_PAUSE ();
159     }
160   clib_atomic_release (&(*p)->n_readers_lock);
161   CLIB_LOCK_DBG (p);
162 }
163
164 always_inline void
165 clib_rwlock_reader_unlock (clib_rwlock_t * p)
166 {
167   ASSERT ((*p)->n_readers > 0);
168   CLIB_LOCK_DBG_CLEAR (p);
169
170   while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
171     CLIB_PAUSE ();
172
173   (*p)->n_readers -= 1;
174   if ((*p)->n_readers == 0)
175     {
176       clib_atomic_release (&(*p)->writer_lock);
177     }
178   clib_atomic_release (&(*p)->n_readers_lock);
179 }
180
181 always_inline void
182 clib_rwlock_writer_lock (clib_rwlock_t * p)
183 {
184   while (clib_atomic_test_and_set (&(*p)->writer_lock))
185     CLIB_PAUSE ();
186   CLIB_LOCK_DBG (p);
187 }
188
189 always_inline void
190 clib_rwlock_writer_unlock (clib_rwlock_t * p)
191 {
192   CLIB_LOCK_DBG_CLEAR (p);
193   clib_atomic_release (&(*p)->writer_lock);
194 }
195
196 #endif
197
198 /*
199  * fd.io coding-style-patch-verification: ON
200  *
201  * Local Variables:
202  * eval: (c-set-style "gnu")
203  * End:
204  */