1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef _RTE_SPINLOCK_X86_64_H_
6 #define _RTE_SPINLOCK_X86_64_H_
12 #include "generic/rte_spinlock.h"
14 #include "rte_cpuflags.h"
15 #include "rte_branch_prediction.h"
16 #include "rte_common.h"
17 #include "rte_pause.h"
19 #define RTE_RTM_MAX_RETRIES (10)
20 #define RTE_XABORT_LOCK_BUSY (0xff)
22 #ifndef RTE_FORCE_INTRINSICS
24 rte_spinlock_lock(rte_spinlock_t *sl)
29 "xchg %[locked], %[lv]\n"
34 "cmpl $0, %[locked]\n"
38 : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
44 rte_spinlock_unlock (rte_spinlock_t *sl)
48 "xchg %[locked], %[ulv]\n"
49 : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
50 : "[ulv]" (unlock_val)
55 rte_spinlock_trylock (rte_spinlock_t *sl)
60 "xchg %[locked], %[lockval]"
61 : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
62 : "[lockval]" (lockval)
69 extern uint8_t rte_rtm_supported;
71 static inline int rte_tm_supported(void)
73 return rte_rtm_supported;
77 rte_try_tm(volatile int *lock)
81 if (!rte_rtm_supported)
84 retries = RTE_RTM_MAX_RETRIES;
86 while (likely(retries--)) {
88 unsigned int status = rte_xbegin();
90 if (likely(RTE_XBEGIN_STARTED == status)) {
92 rte_xabort(RTE_XABORT_LOCK_BUSY);
99 if ((status & RTE_XABORT_EXPLICIT) &&
100 (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))
103 if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
110 rte_spinlock_lock_tm(rte_spinlock_t *sl)
112 if (likely(rte_try_tm(&sl->locked)))
115 rte_spinlock_lock(sl); /* fall-back */
119 rte_spinlock_trylock_tm(rte_spinlock_t *sl)
121 if (likely(rte_try_tm(&sl->locked)))
124 return rte_spinlock_trylock(sl);
128 rte_spinlock_unlock_tm(rte_spinlock_t *sl)
130 if (unlikely(sl->locked))
131 rte_spinlock_unlock(sl);
137 rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
139 if (likely(rte_try_tm(&slr->sl.locked)))
142 rte_spinlock_recursive_lock(slr); /* fall-back */
146 rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
148 if (unlikely(slr->sl.locked))
149 rte_spinlock_recursive_unlock(slr);
155 rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
157 if (likely(rte_try_tm(&slr->sl.locked)))
160 return rte_spinlock_recursive_trylock(slr);
168 #endif /* _RTE_SPINLOCK_X86_64_H_ */