X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fcpu.h;h=bc4ee58b71698b4d52202fd538c00913074ceb83;hb=34c54dff5c66f9a4aef77daf08219301e15cc1fa;hp=d523e88da89c475f8189eea1381b7eb2642575c3;hpb=6459315569f49bcc2b3572f169192ded7323598f;p=vpp.git diff --git a/src/vppinfra/cpu.h b/src/vppinfra/cpu.h index d523e88da89..bc4ee58b716 100644 --- a/src/vppinfra/cpu.h +++ b/src/vppinfra/cpu.h @@ -19,20 +19,31 @@ #include #include -/* - * multiarchitecture support. Adding new entry will produce - * new graph node function variant optimized for specific cpu - * microarchitecture. - * Order is important for runtime selection, as 1st match wins... - */ - -#if __x86_64__ && CLIB_DEBUG == 0 -#define foreach_march_variant(macro, x) \ - macro(avx2, x, "arch=core-avx2") +#if defined(__x86_64__) +#define foreach_march_variant \ + _ (hsw, "Intel Haswell") \ + _ (trm, "Intel Tremont") \ + _ (skx, "Intel Skylake (server) / Cascade Lake") \ + _ (icl, "Intel Ice Lake") +#elif defined(__aarch64__) +#define foreach_march_variant \ + _ (octeontx2, "Marvell Octeon TX2") \ + _ (thunderx2t99, "Marvell ThunderX2 T99") \ + _ (qdf24xx, "Qualcomm CentriqTM 2400") \ + _ (cortexa72, "ARM Cortex-A72") \ + _ (neoversen1, "ARM Neoverse N1") #else -#define foreach_march_variant(macro, x) +#define foreach_march_variant #endif +typedef enum +{ + CLIB_MARCH_VARIANT_TYPE = 0, +#define _(s, n) CLIB_MARCH_VARIANT_TYPE_##s, + foreach_march_variant +#undef _ + CLIB_MARCH_TYPE_N_VARIANTS +} clib_march_variant_type_t; #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0 #define CLIB_CPU_OPTIMIZED __attribute__ ((optimize ("O3"))) @@ -40,14 +51,6 @@ #define CLIB_CPU_OPTIMIZED #endif - -#define CLIB_MULTIARCH_ARCH_CHECK(arch, fn, tgt) \ - if (clib_cpu_supports_ ## arch()) \ - return & fn ## _ ##arch; - -/* FIXME to be removed */ -#define CLIB_MULTIARCH_SELECT_FN(fn,...) - #ifdef CLIB_MARCH_VARIANT #define __CLIB_MULTIARCH_FN(a,b) a##_##b #define _CLIB_MULTIARCH_FN(a,b) __CLIB_MULTIARCH_FN(a,b) @@ -114,18 +117,30 @@ _CLIB_MARCH_FN_REGISTRATION(fn) clib_march_fn_registration *fn##_march_fn_registrations = 0; \ _CLIB_MARCH_FN_REGISTRATION(fn) #endif -#define foreach_x86_64_flags \ -_ (sse3, 1, ecx, 0) \ -_ (ssse3, 1, ecx, 9) \ -_ (sse41, 1, ecx, 19) \ -_ (sse42, 1, ecx, 20) \ -_ (avx, 1, ecx, 28) \ -_ (avx2, 7, ebx, 5) \ -_ (avx512f, 7, ebx, 16) \ -_ (x86_aes, 1, ecx, 25) \ -_ (sha, 7, ebx, 29) \ -_ (invariant_tsc, 0x80000007, edx, 8) - +#define foreach_x86_64_flags \ + _ (sse3, 1, ecx, 0) \ + _ (pclmulqdq, 1, ecx, 1) \ + _ (ssse3, 1, ecx, 9) \ + _ (sse41, 1, ecx, 19) \ + _ (sse42, 1, ecx, 20) \ + _ (avx, 1, ecx, 28) \ + _ (rdrand, 1, ecx, 30) \ + _ (avx2, 7, ebx, 5) \ + _ (rtm, 7, ebx, 11) \ + _ (pqm, 7, ebx, 12) \ + _ (pqe, 7, ebx, 15) \ + _ (avx512f, 7, ebx, 16) \ + _ (rdseed, 7, ebx, 18) \ + _ (x86_aes, 1, ecx, 25) \ + _ (sha, 7, ebx, 29) \ + _ (vaes, 7, ecx, 9) \ + _ (vpclmulqdq, 7, ecx, 10) \ + _ (avx512_vnni, 7, ecx, 11) \ + _ (avx512_bitalg, 7, ecx, 12) \ + _ (avx512_vpopcntdq, 7, ecx, 14) \ + _ (movdiri, 7, ecx, 27) \ + _ (movdir64b, 7, ecx, 28) \ + _ (invariant_tsc, 0x80000007, edx, 8) #define foreach_aarch64_flags \ _ (fp, 0) \ @@ -227,7 +242,7 @@ static inline int clib_cpu_supports_ ## flag() { return 0; } static inline int clib_cpu_supports_aes () { -#if defined (__aarch64__) +#if defined(__x86_64__) return clib_cpu_supports_x86_aes (); #elif defined (__aarch64__) return clib_cpu_supports_aarch64_aes (); @@ -237,15 +252,31 @@ clib_cpu_supports_aes () } static inline int -clib_cpu_march_priority_avx512 () +clib_cpu_march_priority_icl () +{ + if (clib_cpu_supports_avx512_bitalg ()) + return 200; + return -1; +} + +static inline int +clib_cpu_march_priority_skx () { if (clib_cpu_supports_avx512f ()) - return 20; + return 100; return -1; } static inline int -clib_cpu_march_priority_avx2 () +clib_cpu_march_priority_trm () +{ + if (clib_cpu_supports_movdiri ()) + return 60; + return -1; +} + +static inline int +clib_cpu_march_priority_hsw () { if (clib_cpu_supports_avx2 ()) return 50; @@ -308,18 +339,32 @@ clib_cpu_part () return part; } -#define AARCH64_CPU_IMPLEMENTER_THUNERDERX2 0x43 -#define AARCH64_CPU_PART_THUNERDERX2 0x0af +#define AARCH64_CPU_IMPLEMENTER_CAVIUM 0x43 +#define AARCH64_CPU_PART_THUNDERX2 0x0af +#define AARCH64_CPU_PART_OCTEONTX2T96 0x0b2 +#define AARCH64_CPU_PART_OCTEONTX2T98 0x0b1 #define AARCH64_CPU_IMPLEMENTER_QDF24XX 0x51 #define AARCH64_CPU_PART_QDF24XX 0xc00 #define AARCH64_CPU_IMPLEMENTER_CORTEXA72 0x41 #define AARCH64_CPU_PART_CORTEXA72 0xd08 +#define AARCH64_CPU_IMPLEMENTER_NEOVERSEN1 0x41 +#define AARCH64_CPU_PART_NEOVERSEN1 0xd0c + +static inline int +clib_cpu_march_priority_octeontx2 () +{ + if ((AARCH64_CPU_IMPLEMENTER_CAVIUM == clib_cpu_implementer ()) && + ((AARCH64_CPU_PART_OCTEONTX2T96 == clib_cpu_part ()) + || AARCH64_CPU_PART_OCTEONTX2T98 == clib_cpu_part ())) + return 20; + return -1; +} static inline int clib_cpu_march_priority_thunderx2t99 () { - if ((AARCH64_CPU_IMPLEMENTER_THUNERDERX2 == clib_cpu_implementer ()) && - (AARCH64_CPU_PART_THUNERDERX2 == clib_cpu_part ())) + if ((AARCH64_CPU_IMPLEMENTER_CAVIUM == clib_cpu_implementer ()) && + (AARCH64_CPU_PART_THUNDERX2 == clib_cpu_part ())) return 20; return -1; } @@ -342,6 +387,15 @@ clib_cpu_march_priority_cortexa72 () return -1; } +static inline int +clib_cpu_march_priority_neoversen1 () +{ + if ((AARCH64_CPU_IMPLEMENTER_NEOVERSEN1 == clib_cpu_implementer ()) && + (AARCH64_CPU_PART_NEOVERSEN1 == clib_cpu_part ())) + return 10; + return -1; +} + #ifdef CLIB_MARCH_VARIANT #define CLIB_MARCH_FN_PRIORITY() CLIB_MARCH_SFX(clib_cpu_march_priority)() #else @@ -370,7 +424,7 @@ CLIB_MARCH_SFX(fn ## _march_constructor) (void) \ #else #define CLIB_MARCH_FN(fn, rtype, _args...) \ static rtype CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args); \ - extern int (*fn ## _selected) (_args); \ + extern rtype (*fn ## _selected) (_args); \ extern int fn ## _selected_priority; \ CLIB_MARCH_FN_CONSTRUCTOR (fn) \ static rtype CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args)