udp: fix csum computation when offload disabled
[vpp.git] / src / vppinfra / lock.h
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #ifndef included_clib_lock_h
17 #define included_clib_lock_h
18
19 #include <vppinfra/clib.h>
20 #include <vppinfra/atomics.h>
21
22 #if __x86_64__
23 #define CLIB_PAUSE() __builtin_ia32_pause ()
24 #elif defined (__aarch64__) || defined (__arm__)
25 #define CLIB_PAUSE() __asm__ ("yield")
26 #else
27 #define CLIB_PAUSE()
28 #endif
29
30 #if CLIB_DEBUG > 1
31 #define CLIB_LOCK_DBG(_p)                               \
32 do {                                                    \
33     (*_p)->frame_address = __builtin_frame_address (0); \
34     (*_p)->pid = getpid ();                             \
35     (*_p)->thread_index = os_get_thread_index ();       \
36 } while (0)
37 #define CLIB_LOCK_DBG_CLEAR(_p)                         \
38 do {                                                    \
39     (*_p)->frame_address = 0;                           \
40     (*_p)->pid = 0;                                     \
41     (*_p)->thread_index = 0;                            \
42 } while (0)
43 #else
44 #define CLIB_LOCK_DBG(_p)
45 #define CLIB_LOCK_DBG_CLEAR(_p)
46 #endif
47
48 #define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
49 #define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
50
51 struct clib_spinlock_s
52 {
53   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
54   u32 lock;
55 #if CLIB_DEBUG > 0
56   pid_t pid;
57   uword thread_index;
58   void *frame_address;
59 #endif
60 };
61
62 typedef struct clib_spinlock_s *clib_spinlock_t;
63
64 static inline void
65 clib_spinlock_init (clib_spinlock_t * p)
66 {
67   *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
68   clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
69 }
70
71 static inline void
72 clib_spinlock_free (clib_spinlock_t * p)
73 {
74   if (*p)
75     {
76       clib_mem_free ((void *) *p);
77       *p = 0;
78     }
79 }
80
81 static_always_inline void
82 clib_spinlock_lock (clib_spinlock_t * p)
83 {
84   u32 free = 0;
85   while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->lock, &free, 1, 0))
86     {
87       /* atomic load limits number of compare_exchange executions */
88       while (clib_atomic_load_relax_n (&(*p)->lock))
89         CLIB_PAUSE ();
90       /* on failure, compare_exchange writes (*p)->lock into free */
91       free = 0;
92     }
93   CLIB_LOCK_DBG (p);
94 }
95
96 static_always_inline int
97 clib_spinlock_trylock (clib_spinlock_t * p)
98 {
99   if (PREDICT_FALSE (CLIB_SPINLOCK_IS_LOCKED (p)))
100     return 0;
101   clib_spinlock_lock (p);
102   return 1;
103 }
104
105 static_always_inline void
106 clib_spinlock_lock_if_init (clib_spinlock_t * p)
107 {
108   if (PREDICT_FALSE (*p != 0))
109     clib_spinlock_lock (p);
110 }
111
112 static_always_inline int
113 clib_spinlock_trylock_if_init (clib_spinlock_t * p)
114 {
115   if (PREDICT_FALSE (*p != 0))
116     return clib_spinlock_trylock (p);
117   return 1;
118 }
119
120 static_always_inline void
121 clib_spinlock_unlock (clib_spinlock_t * p)
122 {
123   CLIB_LOCK_DBG_CLEAR (p);
124   /* Make sure all reads/writes are complete before releasing the lock */
125   clib_atomic_release (&(*p)->lock);
126 }
127
128 static_always_inline void
129 clib_spinlock_unlock_if_init (clib_spinlock_t * p)
130 {
131   if (PREDICT_FALSE (*p != 0))
132     clib_spinlock_unlock (p);
133 }
134
135 /*
136  * Readers-Writer Lock
137  */
138
139 typedef struct clib_rw_lock_
140 {
141   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
142   /* -1 when W lock held, > 0 when R lock held */
143   volatile i32 rw_cnt;
144 #if CLIB_DEBUG > 0
145   pid_t pid;
146   uword thread_index;
147   void *frame_address;
148 #endif
149 } *clib_rwlock_t;
150
151 always_inline void
152 clib_rwlock_init (clib_rwlock_t * p)
153 {
154   *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
155   clib_memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
156 }
157
158 always_inline void
159 clib_rwlock_free (clib_rwlock_t * p)
160 {
161   if (*p)
162     {
163       clib_mem_free ((void *) *p);
164       *p = 0;
165     }
166 }
167
168 always_inline void
169 clib_rwlock_reader_lock (clib_rwlock_t * p)
170 {
171   i32 cnt;
172   do
173     {
174       /* rwlock held by a writer */
175       while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) < 0)
176         CLIB_PAUSE ();
177     }
178   while (!clib_atomic_cmp_and_swap_acq_relax_n
179          (&(*p)->rw_cnt, &cnt, cnt + 1, 1));
180   CLIB_LOCK_DBG (p);
181 }
182
183 always_inline void
184 clib_rwlock_reader_unlock (clib_rwlock_t * p)
185 {
186   ASSERT ((*p)->rw_cnt > 0);
187   CLIB_LOCK_DBG_CLEAR (p);
188   clib_atomic_fetch_sub_rel (&(*p)->rw_cnt, 1);
189 }
190
191 always_inline void
192 clib_rwlock_writer_lock (clib_rwlock_t * p)
193 {
194   i32 cnt = 0;
195   do
196     {
197       /* rwlock held by writer or reader(s) */
198       while ((cnt = clib_atomic_load_relax_n (&(*p)->rw_cnt)) != 0)
199         CLIB_PAUSE ();
200     }
201   while (!clib_atomic_cmp_and_swap_acq_relax_n (&(*p)->rw_cnt, &cnt, -1, 1));
202   CLIB_LOCK_DBG (p);
203 }
204
205 always_inline void
206 clib_rwlock_writer_unlock (clib_rwlock_t * p)
207 {
208   CLIB_LOCK_DBG_CLEAR (p);
209   clib_atomic_release (&(*p)->rw_cnt);
210 }
211
212 #endif
213
214 /*
215  * fd.io coding-style-patch-verification: ON
216  *
217  * Local Variables:
218  * eval: (c-set-style "gnu")
219  * End:
220  */