4 * Copyright (C) IBM Corporation 2014.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of IBM Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
35 * Copyright (c) 2008 Marcel Moolenaar
36 * Copyright (c) 2001 Benno Rice
37 * Copyright (c) 2001 David E. O'Brien
38 * Copyright (c) 1998 Doug Rabson
39 * All rights reserved.
42 #ifndef _RTE_ATOMIC_PPC_64_H_
43 #define _RTE_ATOMIC_PPC_64_H_
50 #include "generic/rte_atomic.h"
53 * General memory barrier.
55 * Guarantees that the LOAD and STORE operations generated before the
56 * barrier occur before the LOAD and STORE operations generated after.
58 #define rte_mb() asm volatile("sync" : : : "memory")
61 * Write memory barrier.
63 * Guarantees that the STORE operations generated before the barrier
64 * occur before the STORE operations generated after.
66 #define rte_wmb() asm volatile("sync" : : : "memory")
69 * Read memory barrier.
71 * Guarantees that the LOAD operations generated before the barrier
72 * occur before the LOAD operations generated after.
74 #define rte_rmb() asm volatile("sync" : : : "memory")
76 #define rte_smp_mb() rte_mb()
78 #define rte_smp_wmb() rte_wmb()
80 #define rte_smp_rmb() rte_rmb()
82 #define rte_io_mb() rte_mb()
84 #define rte_io_wmb() rte_wmb()
86 #define rte_io_rmb() rte_rmb()
88 #define rte_cio_wmb() rte_wmb()
90 #define rte_cio_rmb() rte_rmb()
92 /*------------------------- 16 bit atomic operations -------------------------*/
93 /* To be compatible with Power7, use GCC built-in functions for 16 bit
96 #ifndef RTE_FORCE_INTRINSICS
98 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
100 return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
101 __ATOMIC_ACQUIRE) ? 1 : 0;
104 static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
106 return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
110 rte_atomic16_inc(rte_atomic16_t *v)
112 __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
116 rte_atomic16_dec(rte_atomic16_t *v)
118 __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
121 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
123 return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
126 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
128 return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
131 static inline uint16_t
132 rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
134 return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
137 /*------------------------- 32 bit atomic operations -------------------------*/
140 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
142 unsigned int ret = 0;
146 "1:\tlwarx %[ret], 0, %[dst]\n"
147 "cmplw %[exp], %[ret]\n"
149 "stwcx. %[src], 0, %[dst]\n"
154 "stwcx. %[ret], 0, %[dst]\n"
158 : [ret] "=&r" (ret), "=m" (*dst)
168 static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
170 return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
174 rte_atomic32_inc(rte_atomic32_t *v)
179 "1: lwarx %[t],0,%[cnt]\n"
180 "addic %[t],%[t],1\n"
181 "stwcx. %[t],0,%[cnt]\n"
183 : [t] "=&r" (t), "=m" (v->cnt)
184 : [cnt] "r" (&v->cnt), "m" (v->cnt)
185 : "cc", "xer", "memory");
189 rte_atomic32_dec(rte_atomic32_t *v)
194 "1: lwarx %[t],0,%[cnt]\n"
195 "addic %[t],%[t],-1\n"
196 "stwcx. %[t],0,%[cnt]\n"
198 : [t] "=&r" (t), "=m" (v->cnt)
199 : [cnt] "r" (&v->cnt), "m" (v->cnt)
200 : "cc", "xer", "memory");
203 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
209 "1: lwarx %[ret],0,%[cnt]\n"
210 "addic %[ret],%[ret],1\n"
211 "stwcx. %[ret],0,%[cnt]\n"
215 : [cnt] "r" (&v->cnt)
216 : "cc", "xer", "memory");
221 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
227 "1: lwarx %[ret],0,%[cnt]\n"
228 "addic %[ret],%[ret],-1\n"
229 "stwcx. %[ret],0,%[cnt]\n"
233 : [cnt] "r" (&v->cnt)
234 : "cc", "xer", "memory");
239 static inline uint32_t
240 rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
242 return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
245 /*------------------------- 64 bit atomic operations -------------------------*/
248 rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
250 unsigned int ret = 0;
254 "1: ldarx %[ret], 0, %[dst]\n"
255 "cmpld %[exp], %[ret]\n"
257 "stdcx. %[src], 0, %[dst]\n"
262 "stdcx. %[ret], 0, %[dst]\n"
266 : [ret] "=&r" (ret), "=m" (*dst)
276 rte_atomic64_init(rte_atomic64_t *v)
281 static inline int64_t
282 rte_atomic64_read(rte_atomic64_t *v)
286 asm volatile("ld%U1%X1 %[ret],%[cnt]"
288 : [cnt] "m"(v->cnt));
294 rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
296 asm volatile("std%U0%X0 %[new_value],%[cnt]"
298 : [new_value] "r"(new_value));
302 rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
307 "1: ldarx %[t],0,%[cnt]\n"
308 "add %[t],%[inc],%[t]\n"
309 "stdcx. %[t],0,%[cnt]\n"
311 : [t] "=&r" (t), "=m" (v->cnt)
312 : [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
317 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
322 "1: ldarx %[t],0,%[cnt]\n"
323 "subf %[t],%[dec],%[t]\n"
324 "stdcx. %[t],0,%[cnt]\n"
326 : [t] "=&r" (t), "+m" (v->cnt)
327 : [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
332 rte_atomic64_inc(rte_atomic64_t *v)
337 "1: ldarx %[t],0,%[cnt]\n"
338 "addic %[t],%[t],1\n"
339 "stdcx. %[t],0,%[cnt]\n"
341 : [t] "=&r" (t), "+m" (v->cnt)
342 : [cnt] "r" (&v->cnt), "m" (v->cnt)
343 : "cc", "xer", "memory");
347 rte_atomic64_dec(rte_atomic64_t *v)
352 "1: ldarx %[t],0,%[cnt]\n"
353 "addic %[t],%[t],-1\n"
354 "stdcx. %[t],0,%[cnt]\n"
356 : [t] "=&r" (t), "+m" (v->cnt)
357 : [cnt] "r" (&v->cnt), "m" (v->cnt)
358 : "cc", "xer", "memory");
361 static inline int64_t
362 rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
368 "1: ldarx %[ret],0,%[cnt]\n"
369 "add %[ret],%[inc],%[ret]\n"
370 "stdcx. %[ret],0,%[cnt]\n"
374 : [inc] "r" (inc), [cnt] "r" (&v->cnt)
380 static inline int64_t
381 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
387 "1: ldarx %[ret],0,%[cnt]\n"
388 "subf %[ret],%[dec],%[ret]\n"
389 "stdcx. %[ret],0,%[cnt]\n"
393 : [dec] "r" (dec), [cnt] "r" (&v->cnt)
399 static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
405 "1: ldarx %[ret],0,%[cnt]\n"
406 "addic %[ret],%[ret],1\n"
407 "stdcx. %[ret],0,%[cnt]\n"
411 : [cnt] "r" (&v->cnt)
412 : "cc", "xer", "memory");
417 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
423 "1: ldarx %[ret],0,%[cnt]\n"
424 "addic %[ret],%[ret],-1\n"
425 "stdcx. %[ret],0,%[cnt]\n"
429 : [cnt] "r" (&v->cnt)
430 : "cc", "xer", "memory");
435 static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
437 return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
440 * Atomically set a 64-bit counter to 0.
443 * A pointer to the atomic counter.
445 static inline void rte_atomic64_clear(rte_atomic64_t *v)
450 static inline uint64_t
451 rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
453 return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
462 #endif /* _RTE_ATOMIC_PPC_64_H_ */