2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_clib_cpu_h
17 #define included_clib_cpu_h
19 #include <sys/syscall.h>
20 #include <vppinfra/format.h>
22 #if defined(__x86_64__)
23 #define foreach_march_variant \
24 _ (scalar, "Generic (SIMD disabled)") \
25 _ (hsw, "Intel Haswell") \
26 _ (trm, "Intel Tremont") \
27 _ (skx, "Intel Skylake (server) / Cascade Lake") \
28 _ (icl, "Intel Ice Lake") \
29 _ (adl, "Intel Alder Lake") \
30 _ (spr, "Intel Sapphire Rapids") \
31 _ (znver3, "AMD Milan") \
32 _ (znver4, "AMD Genoa")
33 #elif defined(__aarch64__)
34 #define foreach_march_variant \
35 _ (octeontx2, "Marvell Octeon TX2") \
36 _ (thunderx2t99, "Marvell ThunderX2 T99") \
37 _ (qdf24xx, "Qualcomm CentriqTM 2400") \
38 _ (cortexa72, "ARM Cortex-A72") \
39 _ (neoversen1, "ARM Neoverse N1") \
40 _ (neoversen2, "ARM Neoverse N2")
42 #define foreach_march_variant
45 #define amd_vendor(t1, t2, t3) \
46 ((t1 == 0x68747541) && /* htuA */ \
47 (t2 == 0x444d4163) && /* DMAc */ \
48 (t3 == 0x69746e65)) /* itne */
51 CLIB_MARCH_VARIANT_TYPE = 0,
52 #define _(s, n) CLIB_MARCH_VARIANT_TYPE_##s,
55 CLIB_MARCH_TYPE_N_VARIANTS
56 } clib_march_variant_type_t;
58 #ifdef CLIB_MARCH_VARIANT
59 #define __CLIB_MULTIARCH_FN(a,b) a##_##b
60 #define _CLIB_MULTIARCH_FN(a,b) __CLIB_MULTIARCH_FN(a,b)
61 #define CLIB_MULTIARCH_FN(fn) _CLIB_MULTIARCH_FN(fn,CLIB_MARCH_VARIANT)
63 #define CLIB_MULTIARCH_FN(fn) fn
66 #define CLIB_MARCH_SFX CLIB_MULTIARCH_FN
68 typedef struct _clib_march_fn_registration
72 struct _clib_march_fn_registration *next;
74 } clib_march_fn_registration;
76 static_always_inline void *
77 clib_march_select_fn_ptr (clib_march_fn_registration * r)
84 if (last_prio < r->priority)
86 last_prio = r->priority;
94 #define CLIB_MARCH_FN_POINTER(fn) \
95 (__typeof__ (fn) *) clib_march_select_fn_ptr (fn##_march_fn_registrations);
97 #define CLIB_MARCH_FN_VOID_POINTER(fn) \
98 clib_march_select_fn_ptr (fn##_march_fn_registrations);
100 #define _CLIB_MARCH_FN_REGISTRATION(fn) \
101 static clib_march_fn_registration \
102 CLIB_MARCH_SFX(fn##_march_fn_registration) = \
104 .name = CLIB_MARCH_VARIANT_STR \
107 static void __clib_constructor \
108 fn##_march_register () \
110 clib_march_fn_registration *r; \
111 r = & CLIB_MARCH_SFX (fn##_march_fn_registration); \
112 r->priority = CLIB_MARCH_FN_PRIORITY(); \
113 r->next = fn##_march_fn_registrations; \
114 r->function = CLIB_MARCH_SFX (fn); \
115 fn##_march_fn_registrations = r; \
118 #ifdef CLIB_MARCH_VARIANT
119 #define CLIB_MARCH_FN_REGISTRATION(fn) \
120 extern clib_march_fn_registration *fn##_march_fn_registrations; \
121 _CLIB_MARCH_FN_REGISTRATION(fn)
123 #define CLIB_MARCH_FN_REGISTRATION(fn) \
124 clib_march_fn_registration *fn##_march_fn_registrations = 0; \
125 _CLIB_MARCH_FN_REGISTRATION(fn)
127 #define foreach_x86_64_flags \
128 _ (sse3, 1, ecx, 0) \
129 _ (pclmulqdq, 1, ecx, 1) \
130 _ (ssse3, 1, ecx, 9) \
131 _ (sse41, 1, ecx, 19) \
132 _ (sse42, 1, ecx, 20) \
133 _ (avx, 1, ecx, 28) \
134 _ (rdrand, 1, ecx, 30) \
135 _ (avx2, 7, ebx, 5) \
136 _ (bmi2, 7, ebx, 8) \
137 _ (rtm, 7, ebx, 11) \
138 _ (pqm, 7, ebx, 12) \
139 _ (pqe, 7, ebx, 15) \
140 _ (avx512f, 7, ebx, 16) \
141 _ (rdseed, 7, ebx, 18) \
142 _ (x86_aes, 1, ecx, 25) \
143 _ (sha, 7, ebx, 29) \
144 _ (vaes, 7, ecx, 9) \
145 _ (vpclmulqdq, 7, ecx, 10) \
146 _ (avx512_vnni, 7, ecx, 11) \
147 _ (avx512_bitalg, 7, ecx, 12) \
148 _ (avx512_vpopcntdq, 7, ecx, 14) \
149 _ (movdiri, 7, ecx, 27) \
150 _ (movdir64b, 7, ecx, 28) \
151 _ (enqcmd, 7, ecx, 29) \
152 _ (avx512_fp16, 7, edx, 23) \
153 _ (invariant_tsc, 0x80000007, edx, 8) \
154 _ (monitorx, 0x80000001, ecx, 29)
156 #define foreach_aarch64_flags \
181 u32 clib_get_current_cpu_id (void);
182 u32 clib_get_current_numa_node (void);
184 typedef int (*clib_cpu_supports_func_t) (void);
186 #if defined(__x86_64__)
190 clib_get_cpuid (const u32 lev, u32 * eax, u32 * ebx, u32 * ecx, u32 * edx)
192 if ((u32) __get_cpuid_max (0x80000000 & lev, 0) < lev)
195 __cpuid_count (lev, 0, *eax, *ebx, *ecx, *edx);
197 __cpuid (lev, *eax, *ebx, *ecx, *edx);
201 #define _(flag, func, reg, bit) \
203 clib_cpu_supports_ ## flag() \
205 u32 __attribute__((unused)) eax, ebx = 0, ecx = 0, edx = 0; \
206 clib_get_cpuid (func, &eax, &ebx, &ecx, &edx); \
208 return ((reg & (1 << bit)) != 0); \
212 #else /* __x86_64__ */
214 #define _(flag, func, reg, bit) \
215 static inline int clib_cpu_supports_ ## flag() { return 0; }
218 #endif /* __x86_64__ */
219 #if defined(__aarch64__)
220 #include <sys/auxv.h>
221 #define _(flag, bit) \
223 clib_cpu_supports_ ## flag() \
225 unsigned long hwcap = getauxval(AT_HWCAP); \
226 return (hwcap & (1 << bit)); \
228 foreach_aarch64_flags
230 #else /* ! __x86_64__ && !__aarch64__ */
231 #define _(flag, bit) \
232 static inline int clib_cpu_supports_ ## flag() { return 0; }
233 foreach_aarch64_flags
235 #endif /* __x86_64__, __aarch64__ */
237 * aes is the only feature with the same name in both flag lists
238 * handle this by prefixing it with the arch name, and handling it
239 * with the custom function below
242 clib_cpu_supports_aes ()
244 #if defined(__x86_64__)
245 return clib_cpu_supports_x86_aes ();
246 #elif defined (__aarch64__)
247 return clib_cpu_supports_aarch64_aes ();
254 clib_cpu_march_priority_scalar ()
260 clib_cpu_march_priority_spr ()
262 if (clib_cpu_supports_enqcmd ())
268 clib_cpu_march_priority_icl ()
270 if (clib_cpu_supports_avx512_bitalg ())
276 clib_cpu_march_priority_adl ()
278 if (clib_cpu_supports_movdiri () && clib_cpu_supports_avx2 ())
284 clib_cpu_march_priority_skx ()
286 if (clib_cpu_supports_avx512f ())
292 clib_cpu_march_priority_trm ()
294 if (clib_cpu_supports_movdiri ())
300 clib_cpu_march_priority_hsw ()
302 if (clib_cpu_supports_avx2 ())
308 clib_cpu_march_priority_znver4 ()
310 if (clib_cpu_supports_avx512_bitalg () && clib_cpu_supports_monitorx ())
316 clib_cpu_march_priority_znver3 ()
318 if (clib_cpu_supports_avx2 () && clib_cpu_supports_monitorx ())
323 #define X86_CPU_ARCH_PERF_FUNC 0xA
326 clib_get_pmu_counter_count (u8 *fixed, u8 *general)
328 #if defined(__x86_64__)
329 u32 __clib_unused eax = 0, ebx = 0, ecx = 0, edx = 0;
330 clib_get_cpuid (X86_CPU_ARCH_PERF_FUNC, &eax, &ebx, &ecx, &edx);
332 *general = (eax & 0xFF00) >> 8;
333 *fixed = (edx & 0xF);
350 const clib_cpu_info_t *clib_get_cpu_info ();
353 #define AARCH64_CPU_IMPLEMENTER_ARM 0x41
354 #define AARCH64_CPU_PART_CORTEXA72 0xd08
355 #define AARCH64_CPU_PART_NEOVERSEN1 0xd0c
356 #define AARCH64_CPU_PART_NEOVERSEN2 0xd49
359 #define AARCH64_CPU_IMPLEMENTER_CAVIUM 0x43
360 #define AARCH64_CPU_PART_THUNDERX2 0x0af
361 #define AARCH64_CPU_PART_OCTEONTX2T96 0x0b2
362 #define AARCH64_CPU_PART_OCTEONTX2T98 0x0b1
365 #define AARCH64_CPU_IMPLEMENTER_QUALCOMM 0x51
366 #define AARCH64_CPU_PART_QDF24XX 0xc00
369 clib_cpu_march_priority_octeontx2 ()
371 const clib_cpu_info_t *info = clib_get_cpu_info ();
373 if (!info || info->aarch64.implementer != AARCH64_CPU_IMPLEMENTER_CAVIUM)
376 if (info->aarch64.part_num == AARCH64_CPU_PART_OCTEONTX2T96 ||
377 info->aarch64.part_num == AARCH64_CPU_PART_OCTEONTX2T98)
384 clib_cpu_march_priority_thunderx2t99 ()
386 const clib_cpu_info_t *info = clib_get_cpu_info ();
388 if (!info || info->aarch64.implementer != AARCH64_CPU_IMPLEMENTER_CAVIUM)
391 if (info->aarch64.part_num == AARCH64_CPU_PART_THUNDERX2)
398 clib_cpu_march_priority_qdf24xx ()
400 const clib_cpu_info_t *info = clib_get_cpu_info ();
402 if (!info || info->aarch64.implementer != AARCH64_CPU_IMPLEMENTER_QUALCOMM)
405 if (info->aarch64.part_num == AARCH64_CPU_PART_QDF24XX)
412 clib_cpu_march_priority_cortexa72 ()
414 const clib_cpu_info_t *info = clib_get_cpu_info ();
416 if (!info || info->aarch64.implementer != AARCH64_CPU_IMPLEMENTER_ARM)
419 if (info->aarch64.part_num == AARCH64_CPU_PART_CORTEXA72)
426 clib_cpu_march_priority_neoversen1 ()
428 const clib_cpu_info_t *info = clib_get_cpu_info ();
430 if (!info || info->aarch64.implementer != AARCH64_CPU_IMPLEMENTER_ARM)
433 if (info->aarch64.part_num == AARCH64_CPU_PART_NEOVERSEN1)
440 clib_cpu_march_priority_neoversen2 ()
442 const clib_cpu_info_t *info = clib_get_cpu_info ();
444 if (!info || info->aarch64.implementer != AARCH64_CPU_IMPLEMENTER_ARM)
447 if (info->aarch64.part_num == AARCH64_CPU_PART_NEOVERSEN2)
453 #ifdef CLIB_MARCH_VARIANT
454 #define CLIB_MARCH_FN_PRIORITY() CLIB_MARCH_SFX(clib_cpu_march_priority)()
456 #define CLIB_MARCH_FN_PRIORITY() 0
458 #endif /* included_clib_cpu_h */
460 #define CLIB_MARCH_FN_CONSTRUCTOR(fn) \
461 static void __clib_constructor \
462 CLIB_MARCH_SFX(fn ## _march_constructor) (void) \
464 if (CLIB_MARCH_FN_PRIORITY() > fn ## _selected_priority) \
466 fn ## _selected = & CLIB_MARCH_SFX (fn ## _ma); \
467 fn ## _selected_priority = CLIB_MARCH_FN_PRIORITY(); \
471 #ifndef CLIB_MARCH_VARIANT
472 #define CLIB_MARCH_FN(fn, rtype, _args...) \
473 static rtype CLIB_MARCH_SFX (fn##_ma) (_args); \
474 rtype (*fn##_selected) (_args) = &CLIB_MARCH_SFX (fn##_ma); \
475 int fn##_selected_priority = 0; \
476 static inline rtype CLIB_MARCH_SFX (fn##_ma) (_args)
478 #define CLIB_MARCH_FN(fn, rtype, _args...) \
479 static rtype CLIB_MARCH_SFX (fn##_ma) (_args); \
480 extern rtype (*fn##_selected) (_args); \
481 extern int fn##_selected_priority; \
482 CLIB_MARCH_FN_CONSTRUCTOR (fn) \
483 static rtype CLIB_MARCH_SFX (fn##_ma) (_args)
486 #define CLIB_MARCH_FN_SELECT(fn) (* fn ## _selected)
488 format_function_t format_cpu_uarch;
489 format_function_t format_cpu_model_name;
490 format_function_t format_cpu_flags;
491 format_function_t format_march_variant;
494 * fd.io coding-style-patch-verification: ON
497 * eval: (c-set-style "gnu")