2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_clib_cpu_h
17 #define included_clib_cpu_h
19 #include <vppinfra/format.h>
22 * multiarchitecture support. Adding new entry will produce
23 * new graph node function variant optimized for specific cpu
25 * Order is important for runtime selection, as 1st match wins...
28 #if __x86_64__ && CLIB_DEBUG == 0
29 #define foreach_march_variant(macro, x) \
30 macro(avx2, x, "arch=core-avx2")
32 #define foreach_march_variant(macro, x)
36 #if __GNUC__ > 4 && !__clang__
37 #define CLIB_CPU_OPTIMIZED __attribute__ ((optimize ("tree-vectorize")))
39 #define CLIB_CPU_OPTIMIZED
43 #define CLIB_MULTIARCH_ARCH_CHECK(arch, fn, tgt) \
44 if (clib_cpu_supports_ ## arch()) \
45 return & fn ## _ ##arch;
47 #define CLIB_MULTIARCH_SELECT_FN(fn,...) \
48 __VA_ARGS__ void * fn ## _multiarch_select(void) \
50 foreach_march_variant(CLIB_MULTIARCH_ARCH_CHECK, fn) \
54 #ifdef CLIB_MULTIARCH_VARIANT
55 #define __CLIB_MULTIARCH_FN(a,b) a##_##b
56 #define _CLIB_MULTIARCH_FN(a,b) __CLIB_MULTIARCH_FN(a,b)
57 #define CLIB_MULTIARCH_FN(fn) _CLIB_MULTIARCH_FN(fn,CLIB_MULTIARCH_VARIANT)
59 #define CLIB_MULTIARCH_FN(fn) fn
62 #define foreach_x86_64_flags \
64 _ (ssse3, 1, ecx, 9) \
65 _ (sse41, 1, ecx, 19) \
66 _ (sse42, 1, ecx, 20) \
69 _ (avx512f, 7, ebx, 16) \
70 _ (x86_aes, 1, ecx, 25) \
72 _ (invariant_tsc, 0x80000007, edx, 8)
75 #define foreach_aarch64_flags \
100 #if defined(__x86_64__)
104 clib_get_cpuid (const u32 lev, u32 * eax, u32 * ebx, u32 * ecx, u32 * edx)
106 if ((u32) __get_cpuid_max (0x80000000 & lev, 0) < lev)
109 __cpuid_count (lev, 0, *eax, *ebx, *ecx, *edx);
111 __cpuid (lev, *eax, *ebx, *ecx, *edx);
116 #define _(flag, func, reg, bit) \
118 clib_cpu_supports_ ## flag() \
120 u32 __attribute__((unused)) eax, ebx = 0, ecx = 0, edx = 0; \
121 clib_get_cpuid (func, &eax, &ebx, &ecx, &edx); \
123 return ((reg & (1 << bit)) != 0); \
127 #else /* __x86_64__ */
129 #define _(flag, func, reg, bit) \
130 static inline int clib_cpu_supports_ ## flag() { return 0; }
133 #endif /* __x86_64__ */
134 #if defined(__aarch64__)
135 #include <sys/auxv.h>
136 #define _(flag, bit) \
138 clib_cpu_supports_ ## flag() \
140 unsigned long hwcap = getauxval(AT_HWCAP); \
141 return (hwcap & (1 << bit)); \
143 foreach_aarch64_flags
145 #else /* ! __x86_64__ && !__aarch64__ */
146 #define _(flag, bit) \
147 static inline int clib_cpu_supports_ ## flag() { return 0; }
148 foreach_aarch64_flags
150 #endif /* __x86_64__, __aarch64__ */
152 * aes is the only feature with the same name in both flag lists
153 * handle this by prefixing it with the arch name, and handling it
154 * with the custom function below
157 clib_cpu_supports_aes ()
159 #if defined (__aarch64__)
160 return clib_cpu_supports_x86_aes ();
161 #elif defined (__aarch64__)
162 return clib_cpu_supports_aarch64_aes ();
168 #endif /* included_clib_cpu_h */
170 format_function_t format_cpu_uarch;
171 format_function_t format_cpu_model_name;
172 format_function_t format_cpu_flags;
175 * fd.io coding-style-patch-verification: ON
178 * eval: (c-set-style "gnu")