2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #include <vppinfra/longjmp.h>
39 #include <vppinfra/mheap.h>
40 #include <vppinfra/os.h>
43 clib_smp_free (clib_smp_main_t * m)
45 clib_mem_vm_free (m->vm_base,
46 (uword) ((1 + m->n_cpus) << m->log2_n_per_cpu_vm_bytes));
50 allocate_per_cpu_mheap (uword cpu)
52 clib_smp_main_t *m = &clib_smp_main;
54 uword vm_size, stack_size, mheap_flags;
56 ASSERT (os_get_thread_index () == cpu);
58 vm_size = (uword) 1 << m->log2_n_per_cpu_vm_bytes;
59 stack_size = (uword) 1 << m->log2_n_per_cpu_stack_bytes;
61 mheap_flags = MHEAP_FLAG_SMALL_OBJECT_CACHE;
63 /* Heap extends up to start of stack. */
64 heap = mheap_alloc_with_flags (clib_smp_vm_base_for_cpu (m, cpu),
65 vm_size - stack_size, mheap_flags);
66 clib_mem_set_heap (heap);
70 /* Now that we have a heap, allocate main structure on cpu 0. */
71 vec_resize (m->per_cpu_mains, m->n_cpus);
73 /* Allocate shared global heap (thread safe). */
75 mheap_alloc_with_flags (clib_smp_vm_base_for_cpu (m, cpu + m->n_cpus),
77 mheap_flags | MHEAP_FLAG_THREAD_SAFE);
80 m->per_cpu_mains[cpu].heap = heap;
87 clib_smp_main_t *m = &clib_smp_main;
91 clib_mem_vm_alloc ((uword) (m->n_cpus + 1) << m->log2_n_per_cpu_vm_bytes);
93 clib_error ("error allocating virtual memory");
95 for (cpu = 0; cpu < m->n_cpus; cpu++)
96 clib_calljmp (allocate_per_cpu_mheap, cpu,
97 clib_smp_stack_top_for_cpu (m, cpu));
101 clib_smp_lock_init (clib_smp_lock_t ** pl)
104 uword i, n_bytes, n_fifo_elts;
106 /* No locking necessary if n_cpus <= 1.
107 Null means no locking is necessary. */
108 if (clib_smp_main.n_cpus < 2)
114 /* Need n_cpus - 1 elts in waiting fifo. One CPU holds lock
115 and others could potentially be waiting. */
116 n_fifo_elts = clib_smp_main.n_cpus - 1;
118 n_bytes = sizeof (l[0]) + n_fifo_elts * sizeof (l->waiting_fifo[0]);
119 ASSERT_AND_PANIC (n_bytes % CLIB_CACHE_LINE_BYTES == 0);
121 l = clib_mem_alloc_aligned (n_bytes, CLIB_CACHE_LINE_BYTES);
123 memset (l, 0, n_bytes);
124 l->n_waiting_fifo_elts = n_fifo_elts;
126 for (i = 0; i < l->n_waiting_fifo_elts; i++)
127 l->waiting_fifo[i].wait_type = CLIB_SMP_LOCK_WAIT_EMPTY;
133 clib_smp_lock_free (clib_smp_lock_t ** pl)
141 clib_smp_lock_slow_path (clib_smp_lock_t * l,
143 clib_smp_lock_header_t h0, clib_smp_lock_type_t type)
145 clib_smp_lock_header_t h1, h2, h3;
146 uword is_reader = type == CLIB_SMP_LOCK_TYPE_READER;
147 uword n_fifo_elts = l->n_waiting_fifo_elts;
150 /* Atomically advance waiting FIFO tail pointer; my_tail will point
151 to entry where we can insert ourselves to wait for lock to be granted. */
155 my_tail = h1.waiting_fifo.head_index + h1.waiting_fifo.n_elts;
156 my_tail = my_tail >= n_fifo_elts ? my_tail - n_fifo_elts : my_tail;
157 h1.waiting_fifo.n_elts += 1;
158 h1.request_cpu = my_cpu;
160 ASSERT_AND_PANIC (h1.waiting_fifo.n_elts <= n_fifo_elts);
161 ASSERT_AND_PANIC (my_tail >= 0 && my_tail < n_fifo_elts);
163 h2 = clib_smp_lock_set_header (l, h1, h0);
165 /* Tail successfully advanced? */
166 if (clib_smp_lock_header_is_equal (h0, h2))
169 /* It is possible that if head and tail are both zero, CPU with lock would have unlocked lock. */
170 else if (type == CLIB_SMP_LOCK_TYPE_SPIN)
172 while (!h2.writer_has_lock)
174 ASSERT_AND_PANIC (h2.waiting_fifo.n_elts == 0);
176 h1.request_cpu = my_cpu;
177 h1.writer_has_lock = 1;
179 h3 = clib_smp_lock_set_header (l, h1, h2);
182 if (clib_smp_lock_header_is_equal (h2, h3))
189 /* Try to advance tail again. */
194 clib_smp_lock_waiting_fifo_elt_t *w;
196 w = l->waiting_fifo + my_tail;
198 while (w->wait_type != CLIB_SMP_LOCK_WAIT_EMPTY)
201 w->wait_type = (is_reader
202 ? CLIB_SMP_LOCK_WAIT_READER : CLIB_SMP_LOCK_WAIT_WRITER);
204 /* Wait until CPU holding the lock grants us the lock. */
205 while (w->wait_type != CLIB_SMP_LOCK_WAIT_DONE)
208 w->wait_type = CLIB_SMP_LOCK_WAIT_EMPTY;
213 clib_smp_unlock_slow_path (clib_smp_lock_t * l,
215 clib_smp_lock_header_t h0,
216 clib_smp_lock_type_t type)
218 clib_smp_lock_header_t h1, h2;
219 clib_smp_lock_waiting_fifo_elt_t *head;
220 clib_smp_lock_wait_type_t head_wait_type;
221 uword is_reader = type == CLIB_SMP_LOCK_TYPE_READER;
222 uword n_fifo_elts = l->n_waiting_fifo_elts;
223 uword head_index, must_wait_for_readers;
227 /* Advance waiting fifo giving lock to first waiter. */
230 ASSERT_AND_PANIC (h0.waiting_fifo.n_elts != 0);
234 head_index = h1.waiting_fifo.head_index;
235 head = l->waiting_fifo + head_index;
238 ASSERT_AND_PANIC (h1.n_readers_with_lock > 0);
239 h1.n_readers_with_lock -= 1;
243 /* Writer will already have lock. */
244 ASSERT_AND_PANIC (h1.writer_has_lock);
247 while ((head_wait_type =
248 head->wait_type) == CLIB_SMP_LOCK_WAIT_EMPTY)
251 /* Don't advance FIFO to writer unless all readers have unlocked. */
252 must_wait_for_readers =
253 (type != CLIB_SMP_LOCK_TYPE_SPIN
254 && head_wait_type == CLIB_SMP_LOCK_WAIT_WRITER
255 && h1.n_readers_with_lock != 0);
257 if (!must_wait_for_readers)
260 h1.waiting_fifo.n_elts -= 1;
261 if (type != CLIB_SMP_LOCK_TYPE_SPIN)
263 if (head_wait_type == CLIB_SMP_LOCK_WAIT_WRITER)
264 h1.writer_has_lock = h1.n_readers_with_lock == 0;
267 h1.writer_has_lock = 0;
268 h1.n_readers_with_lock += 1;
273 h1.waiting_fifo.head_index =
274 head_index == n_fifo_elts ? 0 : head_index;
275 h1.request_cpu = my_cpu;
277 ASSERT_AND_PANIC (h1.waiting_fifo.head_index >= 0
278 && h1.waiting_fifo.head_index < n_fifo_elts);
279 ASSERT_AND_PANIC (h1.waiting_fifo.n_elts >= 0
280 && h1.waiting_fifo.n_elts <= n_fifo_elts);
282 h2 = clib_smp_lock_set_header (l, h1, h0);
284 if (clib_smp_lock_header_is_equal (h2, h0))
289 if (h0.waiting_fifo.n_elts == 0)
290 return clib_smp_unlock_inline (l, type);
293 if (must_wait_for_readers)
296 /* Wake up head of waiting fifo. */
300 /* Shift lock to first thread waiting in fifo. */
301 head->wait_type = CLIB_SMP_LOCK_WAIT_DONE;
303 /* For read locks we may be able to wake multiple readers. */
305 if (head_wait_type == CLIB_SMP_LOCK_WAIT_READER)
307 uword hi = h0.waiting_fifo.head_index;
308 if (h0.waiting_fifo.n_elts != 0
309 && l->waiting_fifo[hi].wait_type == CLIB_SMP_LOCK_WAIT_READER)
320 * fd.io coding-style-patch-verification: ON
323 * eval: (c-set-style "gnu")