New upstream version 18.08
[deb_dpdk.git] / lib / librte_eal / common / include / arch / x86 / rte_spinlock.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #ifndef _RTE_SPINLOCK_X86_64_H_
6 #define _RTE_SPINLOCK_X86_64_H_
7
8 #ifdef __cplusplus
9 extern "C" {
10 #endif
11
12 #include "generic/rte_spinlock.h"
13 #include "rte_rtm.h"
14 #include "rte_cpuflags.h"
15 #include "rte_branch_prediction.h"
16 #include "rte_common.h"
17 #include "rte_pause.h"
18
19 #define RTE_RTM_MAX_RETRIES (10)
20 #define RTE_XABORT_LOCK_BUSY (0xff)
21
22 #ifndef RTE_FORCE_INTRINSICS
23 static inline void
24 rte_spinlock_lock(rte_spinlock_t *sl)
25 {
26         int lock_val = 1;
27         asm volatile (
28                         "1:\n"
29                         "xchg %[locked], %[lv]\n"
30                         "test %[lv], %[lv]\n"
31                         "jz 3f\n"
32                         "2:\n"
33                         "pause\n"
34                         "cmpl $0, %[locked]\n"
35                         "jnz 2b\n"
36                         "jmp 1b\n"
37                         "3:\n"
38                         : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
39                         : "[lv]" (lock_val)
40                         : "memory");
41 }
42
43 static inline void
44 rte_spinlock_unlock (rte_spinlock_t *sl)
45 {
46         int unlock_val = 0;
47         asm volatile (
48                         "xchg %[locked], %[ulv]\n"
49                         : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
50                         : "[ulv]" (unlock_val)
51                         : "memory");
52 }
53
54 static inline int
55 rte_spinlock_trylock (rte_spinlock_t *sl)
56 {
57         int lockval = 1;
58
59         asm volatile (
60                         "xchg %[locked], %[lockval]"
61                         : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
62                         : "[lockval]" (lockval)
63                         : "memory");
64
65         return lockval == 0;
66 }
67 #endif
68
69 extern uint8_t rte_rtm_supported;
70
71 static inline int rte_tm_supported(void)
72 {
73         return rte_rtm_supported;
74 }
75
76 static inline int
77 rte_try_tm(volatile int *lock)
78 {
79         int retries;
80
81         if (!rte_rtm_supported)
82                 return 0;
83
84         retries = RTE_RTM_MAX_RETRIES;
85
86         while (likely(retries--)) {
87
88                 unsigned int status = rte_xbegin();
89
90                 if (likely(RTE_XBEGIN_STARTED == status)) {
91                         if (unlikely(*lock))
92                                 rte_xabort(RTE_XABORT_LOCK_BUSY);
93                         else
94                                 return 1;
95                 }
96                 while (*lock)
97                         rte_pause();
98
99                 if ((status & RTE_XABORT_EXPLICIT) &&
100                         (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))
101                         continue;
102
103                 if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
104                         break;
105         }
106         return 0;
107 }
108
109 static inline void
110 rte_spinlock_lock_tm(rte_spinlock_t *sl)
111 {
112         if (likely(rte_try_tm(&sl->locked)))
113                 return;
114
115         rte_spinlock_lock(sl); /* fall-back */
116 }
117
118 static inline int
119 rte_spinlock_trylock_tm(rte_spinlock_t *sl)
120 {
121         if (likely(rte_try_tm(&sl->locked)))
122                 return 1;
123
124         return rte_spinlock_trylock(sl);
125 }
126
127 static inline void
128 rte_spinlock_unlock_tm(rte_spinlock_t *sl)
129 {
130         if (unlikely(sl->locked))
131                 rte_spinlock_unlock(sl);
132         else
133                 rte_xend();
134 }
135
136 static inline void
137 rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
138 {
139         if (likely(rte_try_tm(&slr->sl.locked)))
140                 return;
141
142         rte_spinlock_recursive_lock(slr); /* fall-back */
143 }
144
145 static inline void
146 rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
147 {
148         if (unlikely(slr->sl.locked))
149                 rte_spinlock_recursive_unlock(slr);
150         else
151                 rte_xend();
152 }
153
154 static inline int
155 rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
156 {
157         if (likely(rte_try_tm(&slr->sl.locked)))
158                 return 1;
159
160         return rte_spinlock_recursive_trylock(slr);
161 }
162
163
164 #ifdef __cplusplus
165 }
166 #endif
167
168 #endif /* _RTE_SPINLOCK_X86_64_H_ */