virtio: fix txq locking
[vpp.git] / src / vppinfra / lock.h
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
18
19 #include <vppinfra/clib.h>
20 #include <vppinfra/atomics.h>
21
22 #if __x86_64__
23 #define CLIB_PAUSE() __builtin_ia32_pause ()
24 #elif defined (__aarch64__) || defined (__arm__)
25 #define CLIB_PAUSE() __asm__ ("yield")
26 #else
27 #define CLIB_PAUSE()
28 #endif
29
30 #if CLIB_DEBUG > 1
31 #define CLIB_LOCK_DBG(_p)                               \
32 do {                                                    \
33     (*_p)->frame_address = __builtin_frame_address (0); \
34     (*_p)->pid = getpid ();                             \
35     (*_p)->thread_index = os_get_thread_index ();       \
36 } while (0)
37 #define CLIB_LOCK_DBG_CLEAR(_p)                         \
38 do {                                                    \
39     (*_p)->frame_address = 0;                           \
40     (*_p)->pid = 0;                                     \
41     (*_p)->thread_index = 0;                            \
42 } while (0)
43 #else
44 #define CLIB_LOCK_DBG(_p)
45 #define CLIB_LOCK_DBG_CLEAR(_p)
46 #endif
47
48 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
49 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
50
51 typedef struct
52 {
53   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
54   u32 lock;
55 #if CLIB_DEBUG > 0
56   pid_t pid;
57   uword thread_index;
58   void *frame_address;
59 #endif
60 } *clib_spinlock_t;
61
62 static inline void
63 clib_spinlock_init (clib_spinlock_t * p)
64 {
65   *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
66   clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
67 }
68
69 static inline void
70 clib_spinlock_free (clib_spinlock_t * p)
71 {
72   if (*p)
73     {
74       clib_mem_free ((void *) *p);
75       *p = 0;
76     }
77 }
78
79 static_always_inline void
80 clib_spinlock_lock (clib_spinlock_t * p)
81 {
82   u32 free = 0;
83   while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0))
84     {
85       /* atomic load limits number of compare_exchange executions */
86       while (clib_atomic_load_relax_n (&(*p)->lock))
87         CLIB_PAUSE ();
88       /* on failure, compare_exchange writes (*p)->lock into free */
89       free = 0;
90     }
91   CLIB_LOCK_DBG (p);
92 }
93
94 static_always_inline int
95 clib_spinlock_trylock (clib_spinlock_t * p)
96 {
97   if (PREDICT_FALSE (CLIB_SPINLOCK_IS_LOCKED (p)))
98     return 0;
99   clib_spinlock_lock (p);
100   return 1;
101 }
102
103 static_always_inline void
104 clib_spinlock_lock_if_init (clib_spinlock_t * p)
105 {
106   if (PREDICT_FALSE (*p != 0))
107     clib_spinlock_lock (p);
108 }
109
110 static_always_inline int
111 clib_spinlock_trylock_if_init (clib_spinlock_t * p)
112 {
113   if (PREDICT_FALSE (*p != 0))
114     return clib_spinlock_trylock (p);
115   return 1;
116 }
117
118 static_always_inline void
119 clib_spinlock_unlock (clib_spinlock_t * p)
120 {
121   CLIB_LOCK_DBG_CLEAR (p);
122   /* Make sure all reads/writes are complete before releasing the lock */
123   clib_atomic_release (&(*p)->lock);
124 }
125
126 static_always_inline void
127 clib_spinlock_unlock_if_init (clib_spinlock_t * p)
128 {
129   if (PREDICT_FALSE (*p != 0))
130     clib_spinlock_unlock (p);
131 }
132
133 /*
134  * Readers-Writer Lock
135  */
136
137 typedef struct clib_rw_lock_
138 {
139   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
140   /* -1 when W lock held, > 0 when R lock held */
141   volatile i32 rw_cnt;
142 #if CLIB_DEBUG > 0
143   pid_t pid;
144   uword thread_index;
145   void *frame_address;
146 #endif
147 } *clib_rwlock_t;
148
149 always_inline void
150 clib_rwlock_init (clib_rwlock_t * p)
151 {
152   *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
153   clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
154 }
155
156 always_inline void
157 clib_rwlock_free (clib_rwlock_t * p)
158 {
159   if (*p)
160     {
161       clib_mem_free ((void *) *p);
162       *p = 0;
163     }
164 }
165
166 always_inline void
167 clib_rwlock_reader_lock (clib_rwlock_t * p)
168 {
169   i32 cnt;
170   do
171     {
172       /* rwlock held by a writer */
173       while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) < 0)
174         CLIB_PAUSE ();
175     }
176   while (!clib_atomic_cmp_and_swap_acq_relax_n
177          (&(*p)->rw_cnt, &cnt, cnt + 1, 1));
178   CLIB_LOCK_DBG (p);
179 }
180
181 always_inline void
182 clib_rwlock_reader_unlock (clib_rwlock_t * p)
183 {
184   ASSERT ((*p)->rw_cnt > 0);
185   CLIB_LOCK_DBG_CLEAR (p);
186   clib_atomic_fetch_sub_rel (&(*p)->rw_cnt, 1);
187 }
188
189 always_inline void
190 clib_rwlock_writer_lock (clib_rwlock_t * p)
191 {
192   i32 cnt = 0;
193   do
194     {
195       /* rwlock held by writer or reader(s) */
196       while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) != 0)
197         CLIB_PAUSE ();
198     }
199   while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->rw_cnt, &cnt, -1, 1));
200   CLIB_LOCK_DBG (p);
201 }
202
203 always_inline void
204 clib_rwlock_writer_unlock (clib_rwlock_t * p)
205 {
206   CLIB_LOCK_DBG_CLEAR (p);
207   clib_atomic_release (&(*p)->rw_cnt);
208 }
209
210 #endif
211
212 /*
213  * fd.io coding-style-patch-verification: ON
214  *
215  * Local Variables:
216  * eval: (c-set-style "gnu")
217  * End:
218  */