2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_clib_cpu_h
17 #define included_clib_cpu_h
19 #include <sys/syscall.h>
20 #include <vppinfra/format.h>
23 * multiarchitecture support. Adding new entry will produce
24 * new graph node function variant optimized for specific cpu
26 * Order is important for runtime selection, as 1st match wins...
29 #if __x86_64__ && CLIB_DEBUG == 0
30 #define foreach_march_variant(macro, x) \
31 macro(avx2, x, "arch=core-avx2")
33 #define foreach_march_variant(macro, x)
37 #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
38 #define CLIB_CPU_OPTIMIZED __attribute__ ((optimize ("O3")))
40 #define CLIB_CPU_OPTIMIZED
44 #define CLIB_MULTIARCH_ARCH_CHECK(arch, fn, tgt) \
45 if (clib_cpu_supports_ ## arch()) \
46 return & fn ## _ ##arch;
48 /* FIXME to be removed */
49 #define CLIB_MULTIARCH_SELECT_FN(fn,...)
51 #ifdef CLIB_MARCH_VARIANT
52 #define __CLIB_MULTIARCH_FN(a,b) a##_##b
53 #define _CLIB_MULTIARCH_FN(a,b) __CLIB_MULTIARCH_FN(a,b)
54 #define CLIB_MULTIARCH_FN(fn) _CLIB_MULTIARCH_FN(fn,CLIB_MARCH_VARIANT)
56 #define CLIB_MULTIARCH_FN(fn) fn
59 #define CLIB_MARCH_SFX CLIB_MULTIARCH_FN
61 typedef struct _clib_march_fn_registration
65 struct _clib_march_fn_registration *next;
67 } clib_march_fn_registration;
69 static_always_inline void *
70 clib_march_select_fn_ptr (clib_march_fn_registration * r)
77 if (last_prio < r->priority)
79 last_prio = r->priority;
87 #define CLIB_MARCH_FN_POINTER(fn) \
88 clib_march_select_fn_ptr (fn##_march_fn_registrations);
90 #define _CLIB_MARCH_FN_REGISTRATION(fn) \
91 static clib_march_fn_registration \
92 CLIB_MARCH_SFX(fn##_march_fn_registration) = \
94 .name = CLIB_MARCH_VARIANT_STR \
97 static void __clib_constructor \
98 fn##_march_register () \
100 clib_march_fn_registration *r; \
101 r = & CLIB_MARCH_SFX (fn##_march_fn_registration); \
102 r->priority = CLIB_MARCH_FN_PRIORITY(); \
103 r->next = fn##_march_fn_registrations; \
104 r->function = CLIB_MARCH_SFX (fn); \
105 fn##_march_fn_registrations = r; \
108 #ifdef CLIB_MARCH_VARIANT
109 #define CLIB_MARCH_FN_REGISTRATION(fn) \
110 extern clib_march_fn_registration *fn##_march_fn_registrations; \
111 _CLIB_MARCH_FN_REGISTRATION(fn)
113 #define CLIB_MARCH_FN_REGISTRATION(fn) \
114 clib_march_fn_registration *fn##_march_fn_registrations = 0; \
115 _CLIB_MARCH_FN_REGISTRATION(fn)
117 #define foreach_x86_64_flags \
118 _ (sse3, 1, ecx, 0) \
119 _ (pclmulqdq, 1, ecx, 1) \
120 _ (ssse3, 1, ecx, 9) \
121 _ (sse41, 1, ecx, 19) \
122 _ (sse42, 1, ecx, 20) \
123 _ (avx, 1, ecx, 28) \
124 _ (rdrand, 1, ecx, 30) \
125 _ (avx2, 7, ebx, 5) \
126 _ (rtm, 7, ebx, 11) \
127 _ (pqm, 7, ebx, 12) \
128 _ (pqe, 7, ebx, 15) \
129 _ (avx512f, 7, ebx, 16) \
130 _ (rdseed, 7, ebx, 18) \
131 _ (x86_aes, 1, ecx, 25) \
132 _ (sha, 7, ebx, 29) \
133 _ (vaes, 7, ecx, 9) \
134 _ (vpclmulqdq, 7, ecx, 10) \
135 _ (invariant_tsc, 0x80000007, edx, 8)
138 #define foreach_aarch64_flags \
164 clib_get_current_cpu_id ()
167 syscall (__NR_getcpu, &cpu, &node, 0);
172 clib_get_current_numa_node ()
175 syscall (__NR_getcpu, &cpu, &node, 0);
179 #if defined(__x86_64__)
183 clib_get_cpuid (const u32 lev, u32 * eax, u32 * ebx, u32 * ecx, u32 * edx)
185 if ((u32) __get_cpuid_max (0x80000000 & lev, 0) < lev)
188 __cpuid_count (lev, 0, *eax, *ebx, *ecx, *edx);
190 __cpuid (lev, *eax, *ebx, *ecx, *edx);
195 #define _(flag, func, reg, bit) \
197 clib_cpu_supports_ ## flag() \
199 u32 __attribute__((unused)) eax, ebx = 0, ecx = 0, edx = 0; \
200 clib_get_cpuid (func, &eax, &ebx, &ecx, &edx); \
202 return ((reg & (1 << bit)) != 0); \
206 #else /* __x86_64__ */
208 #define _(flag, func, reg, bit) \
209 static inline int clib_cpu_supports_ ## flag() { return 0; }
212 #endif /* __x86_64__ */
213 #if defined(__aarch64__)
214 #include <sys/auxv.h>
215 #define _(flag, bit) \
217 clib_cpu_supports_ ## flag() \
219 unsigned long hwcap = getauxval(AT_HWCAP); \
220 return (hwcap & (1 << bit)); \
222 foreach_aarch64_flags
224 #else /* ! __x86_64__ && !__aarch64__ */
225 #define _(flag, bit) \
226 static inline int clib_cpu_supports_ ## flag() { return 0; }
227 foreach_aarch64_flags
229 #endif /* __x86_64__, __aarch64__ */
231 * aes is the only feature with the same name in both flag lists
232 * handle this by prefixing it with the arch name, and handling it
233 * with the custom function below
236 clib_cpu_supports_aes ()
238 #if defined(__x86_64__)
239 return clib_cpu_supports_x86_aes ();
240 #elif defined (__aarch64__)
241 return clib_cpu_supports_aarch64_aes ();
248 clib_cpu_march_priority_avx512 ()
250 if (clib_cpu_supports_avx512f ())
256 clib_cpu_march_priority_avx2 ()
258 if (clib_cpu_supports_avx2 ())
264 clib_cpu_implementer ()
267 static u32 implementer = -1;
269 if (-1 != implementer)
272 FILE *fp = fopen ("/proc/cpuinfo", "r");
278 if (!fgets (buf, sizeof (buf), fp))
281 if (strstr (buf, "CPU implementer"))
282 implementer = (u32) strtol (memchr (buf, ':', 128) + 2, NULL, 0);
283 if (-1 != implementer)
295 static u32 part = -1;
300 FILE *fp = fopen ("/proc/cpuinfo", "r");
306 if (!fgets (buf, sizeof (buf), fp))
309 if (strstr (buf, "CPU part"))
310 part = (u32) strtol (memchr (buf, ':', 128) + 2, NULL, 0);
319 #define AARCH64_CPU_IMPLEMENTER_THUNERDERX2 0x43
320 #define AARCH64_CPU_PART_THUNERDERX2 0x0af
321 #define AARCH64_CPU_IMPLEMENTER_QDF24XX 0x51
322 #define AARCH64_CPU_PART_QDF24XX 0xc00
323 #define AARCH64_CPU_IMPLEMENTER_CORTEXA72 0x41
324 #define AARCH64_CPU_PART_CORTEXA72 0xd08
327 clib_cpu_march_priority_thunderx2t99 ()
329 if ((AARCH64_CPU_IMPLEMENTER_THUNERDERX2 == clib_cpu_implementer ()) &&
330 (AARCH64_CPU_PART_THUNERDERX2 == clib_cpu_part ()))
336 clib_cpu_march_priority_qdf24xx ()
338 if ((AARCH64_CPU_IMPLEMENTER_QDF24XX == clib_cpu_implementer ()) &&
339 (AARCH64_CPU_PART_QDF24XX == clib_cpu_part ()))
345 clib_cpu_march_priority_cortexa72 ()
347 if ((AARCH64_CPU_IMPLEMENTER_CORTEXA72 == clib_cpu_implementer ()) &&
348 (AARCH64_CPU_PART_CORTEXA72 == clib_cpu_part ()))
353 #ifdef CLIB_MARCH_VARIANT
354 #define CLIB_MARCH_FN_PRIORITY() CLIB_MARCH_SFX(clib_cpu_march_priority)()
356 #define CLIB_MARCH_FN_PRIORITY() 0
358 #endif /* included_clib_cpu_h */
360 #define CLIB_MARCH_FN_CONSTRUCTOR(fn) \
361 static void __clib_constructor \
362 CLIB_MARCH_SFX(fn ## _march_constructor) (void) \
364 if (CLIB_MARCH_FN_PRIORITY() > fn ## _selected_priority) \
366 fn ## _selected = & CLIB_MARCH_SFX (fn ## _ma); \
367 fn ## _selected_priority = CLIB_MARCH_FN_PRIORITY(); \
371 #ifndef CLIB_MARCH_VARIANT
372 #define CLIB_MARCH_FN(fn, rtype, _args...) \
373 static rtype CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args); \
374 rtype (*fn ## _selected) (_args) = & CLIB_MARCH_SFX (fn ## _ma); \
375 int fn ## _selected_priority = 0; \
376 static inline rtype CLIB_CPU_OPTIMIZED \
377 CLIB_MARCH_SFX (fn ## _ma)(_args)
379 #define CLIB_MARCH_FN(fn, rtype, _args...) \
380 static rtype CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args); \
381 extern rtype (*fn ## _selected) (_args); \
382 extern int fn ## _selected_priority; \
383 CLIB_MARCH_FN_CONSTRUCTOR (fn) \
384 static rtype CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args)
387 #define CLIB_MARCH_FN_SELECT(fn) (* fn ## _selected)
389 format_function_t format_cpu_uarch;
390 format_function_t format_cpu_model_name;
391 format_function_t format_cpu_flags;
394 * fd.io coding-style-patch-verification: ON
397 * eval: (c-set-style "gnu")