2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_clib_cpu_h
17 #define included_clib_cpu_h
19 #include <sys/syscall.h>
20 #include <vppinfra/format.h>
22 #if defined(__x86_64__)
23 #define foreach_march_variant \
24 _ (hsw, "Intel Haswell") \
25 _ (trm, "Intel Tremont") \
26 _ (skx, "Intel Skylake (server) / Cascade Lake") \
27 _ (icl, "Intel Ice Lake")
28 #elif defined(__aarch64__)
29 #define foreach_march_variant \
30 _ (octeontx2, "Marvell Octeon TX2") \
31 _ (thunderx2t99, "Marvell ThunderX2 T99") \
32 _ (qdf24xx, "Qualcomm CentriqTM 2400") \
33 _ (cortexa72, "ARM Cortex-A72") \
34 _ (neoversen1, "ARM Neoverse N1")
36 #define foreach_march_variant
41 CLIB_MARCH_VARIANT_TYPE = 0,
42 #define _(s, n) CLIB_MARCH_VARIANT_TYPE_##s,
45 CLIB_MARCH_TYPE_N_VARIANTS
46 } clib_march_variant_type_t;
48 #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
49 #define CLIB_CPU_OPTIMIZED __attribute__ ((optimize ("O3")))
51 #define CLIB_CPU_OPTIMIZED
54 #ifdef CLIB_MARCH_VARIANT
55 #define __CLIB_MULTIARCH_FN(a,b) a##_##b
56 #define _CLIB_MULTIARCH_FN(a,b) __CLIB_MULTIARCH_FN(a,b)
57 #define CLIB_MULTIARCH_FN(fn) _CLIB_MULTIARCH_FN(fn,CLIB_MARCH_VARIANT)
59 #define CLIB_MULTIARCH_FN(fn) fn
62 #define CLIB_MARCH_SFX CLIB_MULTIARCH_FN
64 typedef struct _clib_march_fn_registration
68 struct _clib_march_fn_registration *next;
70 } clib_march_fn_registration;
72 static_always_inline void *
73 clib_march_select_fn_ptr (clib_march_fn_registration * r)
80 if (last_prio < r->priority)
82 last_prio = r->priority;
90 #define CLIB_MARCH_FN_POINTER(fn) \
91 clib_march_select_fn_ptr (fn##_march_fn_registrations);
93 #define _CLIB_MARCH_FN_REGISTRATION(fn) \
94 static clib_march_fn_registration \
95 CLIB_MARCH_SFX(fn##_march_fn_registration) = \
97 .name = CLIB_MARCH_VARIANT_STR \
100 static void __clib_constructor \
101 fn##_march_register () \
103 clib_march_fn_registration *r; \
104 r = & CLIB_MARCH_SFX (fn##_march_fn_registration); \
105 r->priority = CLIB_MARCH_FN_PRIORITY(); \
106 r->next = fn##_march_fn_registrations; \
107 r->function = CLIB_MARCH_SFX (fn); \
108 fn##_march_fn_registrations = r; \
111 #ifdef CLIB_MARCH_VARIANT
112 #define CLIB_MARCH_FN_REGISTRATION(fn) \
113 extern clib_march_fn_registration *fn##_march_fn_registrations; \
114 _CLIB_MARCH_FN_REGISTRATION(fn)
116 #define CLIB_MARCH_FN_REGISTRATION(fn) \
117 clib_march_fn_registration *fn##_march_fn_registrations = 0; \
118 _CLIB_MARCH_FN_REGISTRATION(fn)
120 #define foreach_x86_64_flags \
121 _ (sse3, 1, ecx, 0) \
122 _ (pclmulqdq, 1, ecx, 1) \
123 _ (ssse3, 1, ecx, 9) \
124 _ (sse41, 1, ecx, 19) \
125 _ (sse42, 1, ecx, 20) \
126 _ (avx, 1, ecx, 28) \
127 _ (rdrand, 1, ecx, 30) \
128 _ (avx2, 7, ebx, 5) \
129 _ (rtm, 7, ebx, 11) \
130 _ (pqm, 7, ebx, 12) \
131 _ (pqe, 7, ebx, 15) \
132 _ (avx512f, 7, ebx, 16) \
133 _ (rdseed, 7, ebx, 18) \
134 _ (x86_aes, 1, ecx, 25) \
135 _ (sha, 7, ebx, 29) \
136 _ (vaes, 7, ecx, 9) \
137 _ (vpclmulqdq, 7, ecx, 10) \
138 _ (avx512_vnni, 7, ecx, 11) \
139 _ (avx512_bitalg, 7, ecx, 12) \
140 _ (avx512_vpopcntdq, 7, ecx, 14) \
141 _ (movdiri, 7, ecx, 27) \
142 _ (movdir64b, 7, ecx, 28) \
143 _ (invariant_tsc, 0x80000007, edx, 8)
145 #define foreach_aarch64_flags \
171 clib_get_current_cpu_id ()
174 syscall (__NR_getcpu, &cpu, &node, 0);
179 clib_get_current_numa_node ()
182 syscall (__NR_getcpu, &cpu, &node, 0);
186 #if defined(__x86_64__)
190 clib_get_cpuid (const u32 lev, u32 * eax, u32 * ebx, u32 * ecx, u32 * edx)
192 if ((u32) __get_cpuid_max (0x80000000 & lev, 0) < lev)
195 __cpuid_count (lev, 0, *eax, *ebx, *ecx, *edx);
197 __cpuid (lev, *eax, *ebx, *ecx, *edx);
202 #define _(flag, func, reg, bit) \
204 clib_cpu_supports_ ## flag() \
206 u32 __attribute__((unused)) eax, ebx = 0, ecx = 0, edx = 0; \
207 clib_get_cpuid (func, &eax, &ebx, &ecx, &edx); \
209 return ((reg & (1 << bit)) != 0); \
213 #else /* __x86_64__ */
215 #define _(flag, func, reg, bit) \
216 static inline int clib_cpu_supports_ ## flag() { return 0; }
219 #endif /* __x86_64__ */
220 #if defined(__aarch64__)
221 #include <sys/auxv.h>
222 #define _(flag, bit) \
224 clib_cpu_supports_ ## flag() \
226 unsigned long hwcap = getauxval(AT_HWCAP); \
227 return (hwcap & (1 << bit)); \
229 foreach_aarch64_flags
231 #else /* ! __x86_64__ && !__aarch64__ */
232 #define _(flag, bit) \
233 static inline int clib_cpu_supports_ ## flag() { return 0; }
234 foreach_aarch64_flags
236 #endif /* __x86_64__, __aarch64__ */
238 * aes is the only feature with the same name in both flag lists
239 * handle this by prefixing it with the arch name, and handling it
240 * with the custom function below
243 clib_cpu_supports_aes ()
245 #if defined(__x86_64__)
246 return clib_cpu_supports_x86_aes ();
247 #elif defined (__aarch64__)
248 return clib_cpu_supports_aarch64_aes ();
255 clib_cpu_march_priority_icl ()
257 if (clib_cpu_supports_avx512_bitalg ())
263 clib_cpu_march_priority_skx ()
265 if (clib_cpu_supports_avx512f ())
271 clib_cpu_march_priority_trm ()
273 if (clib_cpu_supports_movdiri ())
279 clib_cpu_march_priority_hsw ()
281 if (clib_cpu_supports_avx2 ())
287 clib_cpu_implementer ()
290 static u32 implementer = -1;
292 if (-1 != implementer)
295 FILE *fp = fopen ("/proc/cpuinfo", "r");
301 if (!fgets (buf, sizeof (buf), fp))
304 if (strstr (buf, "CPU implementer"))
305 implementer = (u32) strtol (memchr (buf, ':', 128) + 2, NULL, 0);
306 if (-1 != implementer)
318 static u32 part = -1;
323 FILE *fp = fopen ("/proc/cpuinfo", "r");
329 if (!fgets (buf, sizeof (buf), fp))
332 if (strstr (buf, "CPU part"))
333 part = (u32) strtol (memchr (buf, ':', 128) + 2, NULL, 0);
342 #define AARCH64_CPU_IMPLEMENTER_CAVIUM 0x43
343 #define AARCH64_CPU_PART_THUNDERX2 0x0af
344 #define AARCH64_CPU_PART_OCTEONTX2T96 0x0b2
345 #define AARCH64_CPU_PART_OCTEONTX2T98 0x0b1
346 #define AARCH64_CPU_IMPLEMENTER_QDF24XX 0x51
347 #define AARCH64_CPU_PART_QDF24XX 0xc00
348 #define AARCH64_CPU_IMPLEMENTER_CORTEXA72 0x41
349 #define AARCH64_CPU_PART_CORTEXA72 0xd08
350 #define AARCH64_CPU_IMPLEMENTER_NEOVERSEN1 0x41
351 #define AARCH64_CPU_PART_NEOVERSEN1 0xd0c
354 clib_cpu_march_priority_octeontx2 ()
356 if ((AARCH64_CPU_IMPLEMENTER_CAVIUM == clib_cpu_implementer ()) &&
357 ((AARCH64_CPU_PART_OCTEONTX2T96 == clib_cpu_part ())
358 || AARCH64_CPU_PART_OCTEONTX2T98 == clib_cpu_part ()))
364 clib_cpu_march_priority_thunderx2t99 ()
366 if ((AARCH64_CPU_IMPLEMENTER_CAVIUM == clib_cpu_implementer ()) &&
367 (AARCH64_CPU_PART_THUNDERX2 == clib_cpu_part ()))
373 clib_cpu_march_priority_qdf24xx ()
375 if ((AARCH64_CPU_IMPLEMENTER_QDF24XX == clib_cpu_implementer ()) &&
376 (AARCH64_CPU_PART_QDF24XX == clib_cpu_part ()))
382 clib_cpu_march_priority_cortexa72 ()
384 if ((AARCH64_CPU_IMPLEMENTER_CORTEXA72 == clib_cpu_implementer ()) &&
385 (AARCH64_CPU_PART_CORTEXA72 == clib_cpu_part ()))
391 clib_cpu_march_priority_neoversen1 ()
393 if ((AARCH64_CPU_IMPLEMENTER_NEOVERSEN1 == clib_cpu_implementer ()) &&
394 (AARCH64_CPU_PART_NEOVERSEN1 == clib_cpu_part ()))
399 #ifdef CLIB_MARCH_VARIANT
400 #define CLIB_MARCH_FN_PRIORITY() CLIB_MARCH_SFX(clib_cpu_march_priority)()
402 #define CLIB_MARCH_FN_PRIORITY() 0
404 #endif /* included_clib_cpu_h */
406 #define CLIB_MARCH_FN_CONSTRUCTOR(fn) \
407 static void __clib_constructor \
408 CLIB_MARCH_SFX(fn ## _march_constructor) (void) \
410 if (CLIB_MARCH_FN_PRIORITY() > fn ## _selected_priority) \
412 fn ## _selected = & CLIB_MARCH_SFX (fn ## _ma); \
413 fn ## _selected_priority = CLIB_MARCH_FN_PRIORITY(); \
417 #ifndef CLIB_MARCH_VARIANT
418 #define CLIB_MARCH_FN(fn, rtype, _args...) \
419 static rtype CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args); \
420 rtype (*fn ## _selected) (_args) = & CLIB_MARCH_SFX (fn ## _ma); \
421 int fn ## _selected_priority = 0; \
422 static inline rtype CLIB_CPU_OPTIMIZED \
423 CLIB_MARCH_SFX (fn ## _ma)(_args)
425 #define CLIB_MARCH_FN(fn, rtype, _args...) \
426 static rtype CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args); \
427 extern rtype (*fn ## _selected) (_args); \
428 extern int fn ## _selected_priority; \
429 CLIB_MARCH_FN_CONSTRUCTOR (fn) \
430 static rtype CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args)
433 #define CLIB_MARCH_FN_SELECT(fn) (* fn ## _selected)
435 format_function_t format_cpu_uarch;
436 format_function_t format_cpu_model_name;
437 format_function_t format_cpu_flags;
440 * fd.io coding-style-patch-verification: ON
443 * eval: (c-set-style "gnu")