New upstream version 18.02
[deb_dpdk.git] / lib / librte_eal / common / include / arch / x86 / rte_spinlock.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #ifndef _RTE_SPINLOCK_X86_64_H_
6 #define _RTE_SPINLOCK_X86_64_H_
7
8 #ifdef __cplusplus
9 extern "C" {
10 #endif
11
12 #include "generic/rte_spinlock.h"
13 #include "rte_rtm.h"
14 #include "rte_cpuflags.h"
15 #include "rte_branch_prediction.h"
16 #include "rte_common.h"
17 #include "rte_pause.h"
18
19 #define RTE_RTM_MAX_RETRIES (10)
20 #define RTE_XABORT_LOCK_BUSY (0xff)
21
22 #ifndef RTE_FORCE_INTRINSICS
23 static inline void
24 rte_spinlock_lock(rte_spinlock_t *sl)
25 {
26         int lock_val = 1;
27         asm volatile (
28                         "1:\n"
29                         "xchg %[locked], %[lv]\n"
30                         "test %[lv], %[lv]\n"
31                         "jz 3f\n"
32                         "2:\n"
33                         "pause\n"
34                         "cmpl $0, %[locked]\n"
35                         "jnz 2b\n"
36                         "jmp 1b\n"
37                         "3:\n"
38                         : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
39                         : "[lv]" (lock_val)
40                         : "memory");
41 }
42
43 static inline void
44 rte_spinlock_unlock (rte_spinlock_t *sl)
45 {
46         int unlock_val = 0;
47         asm volatile (
48                         "xchg %[locked], %[ulv]\n"
49                         : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
50                         : "[ulv]" (unlock_val)
51                         : "memory");
52 }
53
54 static inline int
55 rte_spinlock_trylock (rte_spinlock_t *sl)
56 {
57         int lockval = 1;
58
59         asm volatile (
60                         "xchg %[locked], %[lockval]"
61                         : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
62                         : "[lockval]" (lockval)
63                         : "memory");
64
65         return lockval == 0;
66 }
67 #endif
68
69 extern uint8_t rte_rtm_supported;
70
71 static inline int rte_tm_supported(void)
72 {
73         return rte_rtm_supported;
74 }
75
76 static inline int
77 rte_try_tm(volatile int *lock)
78 {
79         if (!rte_rtm_supported)
80                 return 0;
81
82         int retries = RTE_RTM_MAX_RETRIES;
83
84         while (likely(retries--)) {
85
86                 unsigned int status = rte_xbegin();
87
88                 if (likely(RTE_XBEGIN_STARTED == status)) {
89                         if (unlikely(*lock))
90                                 rte_xabort(RTE_XABORT_LOCK_BUSY);
91                         else
92                                 return 1;
93                 }
94                 while (*lock)
95                         rte_pause();
96
97                 if ((status & RTE_XABORT_EXPLICIT) &&
98                         (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))
99                         continue;
100
101                 if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
102                         break;
103         }
104         return 0;
105 }
106
107 static inline void
108 rte_spinlock_lock_tm(rte_spinlock_t *sl)
109 {
110         if (likely(rte_try_tm(&sl->locked)))
111                 return;
112
113         rte_spinlock_lock(sl); /* fall-back */
114 }
115
116 static inline int
117 rte_spinlock_trylock_tm(rte_spinlock_t *sl)
118 {
119         if (likely(rte_try_tm(&sl->locked)))
120                 return 1;
121
122         return rte_spinlock_trylock(sl);
123 }
124
125 static inline void
126 rte_spinlock_unlock_tm(rte_spinlock_t *sl)
127 {
128         if (unlikely(sl->locked))
129                 rte_spinlock_unlock(sl);
130         else
131                 rte_xend();
132 }
133
134 static inline void
135 rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
136 {
137         if (likely(rte_try_tm(&slr->sl.locked)))
138                 return;
139
140         rte_spinlock_recursive_lock(slr); /* fall-back */
141 }
142
143 static inline void
144 rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
145 {
146         if (unlikely(slr->sl.locked))
147                 rte_spinlock_recursive_unlock(slr);
148         else
149                 rte_xend();
150 }
151
152 static inline int
153 rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
154 {
155         if (likely(rte_try_tm(&slr->sl.locked)))
156                 return 1;
157
158         return rte_spinlock_recursive_trylock(slr);
159 }
160
161
162 #ifdef __cplusplus
163 }
164 #endif
165
166 #endif /* _RTE_SPINLOCK_X86_64_H_ */