#ifndef included_clib_cpu_h
#define included_clib_cpu_h
+#include <sys/syscall.h>
#include <vppinfra/format.h>
-/*
- * multiarchitecture support. Adding new entry will produce
- * new graph node function variant optimized for specific cpu
- * microarchitecture.
- * Order is important for runtime selection, as 1st match wins...
- */
-
-#if __x86_64__ && CLIB_DEBUG == 0
-#define foreach_march_variant(macro, x) \
- macro(avx2, x, "arch=core-avx2")
+#if defined(__x86_64__)
+#define foreach_march_variant \
+ _ (hsw, "Intel Haswell") \
+ _ (trm, "Intel Tremont") \
+ _ (skx, "Intel Skylake (server) / Cascade Lake") \
+ _ (icl, "Intel Ice Lake")
+#elif defined(__aarch64__)
+#define foreach_march_variant \
+ _ (octeontx2, "Marvell Octeon TX2") \
+ _ (thunderx2t99, "Marvell ThunderX2 T99") \
+ _ (qdf24xx, "Qualcomm CentriqTM 2400") \
+ _ (cortexa72, "ARM Cortex-A72") \
+ _ (neoversen1, "ARM Neoverse N1")
#else
-#define foreach_march_variant(macro, x)
+#define foreach_march_variant
#endif
+typedef enum
+{
+ CLIB_MARCH_VARIANT_TYPE = 0,
+#define _(s, n) CLIB_MARCH_VARIANT_TYPE_##s,
+ foreach_march_variant
+#undef _
+ CLIB_MARCH_TYPE_N_VARIANTS
+} clib_march_variant_type_t;
-#if __GNUC__ > 4 && !__clang__
-#define CLIB_CPU_OPTIMIZED __attribute__ ((optimize ("O3")))
+#ifdef CLIB_MARCH_VARIANT
+#define __CLIB_MULTIARCH_FN(a,b) a##_##b
+#define _CLIB_MULTIARCH_FN(a,b) __CLIB_MULTIARCH_FN(a,b)
+#define CLIB_MULTIARCH_FN(fn) _CLIB_MULTIARCH_FN(fn,CLIB_MARCH_VARIANT)
#else
-#define CLIB_CPU_OPTIMIZED
+#define CLIB_MULTIARCH_FN(fn) fn
#endif
+#define CLIB_MARCH_SFX CLIB_MULTIARCH_FN
-#define CLIB_MULTIARCH_ARCH_CHECK(arch, fn, tgt) \
- if (clib_cpu_supports_ ## arch()) \
- return & fn ## _ ##arch;
+typedef struct _clib_march_fn_registration
+{
+ void *function;
+ int priority;
+ struct _clib_march_fn_registration *next;
+ char *name;
+} clib_march_fn_registration;
-#define CLIB_MULTIARCH_SELECT_FN(fn,...) \
- __VA_ARGS__ void * fn ## _multiarch_select(void) \
-{ \
- foreach_march_variant(CLIB_MULTIARCH_ARCH_CHECK, fn) \
- return & fn; \
+static_always_inline void *
+clib_march_select_fn_ptr (clib_march_fn_registration * r)
+{
+ void *rv = 0;
+ int last_prio = -1;
+
+ while (r)
+ {
+ if (last_prio < r->priority)
+ {
+ last_prio = r->priority;
+ rv = r->function;
+ }
+ r = r->next;
+ }
+ return rv;
}
-#ifdef CLIB_MULTIARCH_VARIANT
-#define __CLIB_MULTIARCH_FN(a,b) a##_##b
-#define _CLIB_MULTIARCH_FN(a,b) __CLIB_MULTIARCH_FN(a,b)
-#define CLIB_MULTIARCH_FN(fn) _CLIB_MULTIARCH_FN(fn,CLIB_MULTIARCH_VARIANT)
-#else
-#define CLIB_MULTIARCH_FN(fn) fn
-#endif
+#define CLIB_MARCH_FN_POINTER(fn) \
+ clib_march_select_fn_ptr (fn##_march_fn_registrations);
-#define foreach_x86_64_flags \
-_ (sse3, 1, ecx, 0) \
-_ (ssse3, 1, ecx, 9) \
-_ (sse41, 1, ecx, 19) \
-_ (sse42, 1, ecx, 20) \
-_ (avx, 1, ecx, 28) \
-_ (avx2, 7, ebx, 5) \
-_ (avx512f, 7, ebx, 16) \
-_ (x86_aes, 1, ecx, 25) \
-_ (sha, 7, ebx, 29) \
-_ (invariant_tsc, 0x80000007, edx, 8)
+#define _CLIB_MARCH_FN_REGISTRATION(fn) \
+static clib_march_fn_registration \
+CLIB_MARCH_SFX(fn##_march_fn_registration) = \
+{ \
+ .name = CLIB_MARCH_VARIANT_STR \
+}; \
+\
+static void __clib_constructor \
+fn##_march_register () \
+{ \
+ clib_march_fn_registration *r; \
+ r = & CLIB_MARCH_SFX (fn##_march_fn_registration); \
+ r->priority = CLIB_MARCH_FN_PRIORITY(); \
+ r->next = fn##_march_fn_registrations; \
+ r->function = CLIB_MARCH_SFX (fn); \
+ fn##_march_fn_registrations = r; \
+}
+#ifdef CLIB_MARCH_VARIANT
+#define CLIB_MARCH_FN_REGISTRATION(fn) \
+extern clib_march_fn_registration *fn##_march_fn_registrations; \
+_CLIB_MARCH_FN_REGISTRATION(fn)
+#else
+#define CLIB_MARCH_FN_REGISTRATION(fn) \
+clib_march_fn_registration *fn##_march_fn_registrations = 0; \
+_CLIB_MARCH_FN_REGISTRATION(fn)
+#endif
+#define foreach_x86_64_flags \
+ _ (sse3, 1, ecx, 0) \
+ _ (pclmulqdq, 1, ecx, 1) \
+ _ (ssse3, 1, ecx, 9) \
+ _ (sse41, 1, ecx, 19) \
+ _ (sse42, 1, ecx, 20) \
+ _ (avx, 1, ecx, 28) \
+ _ (rdrand, 1, ecx, 30) \
+ _ (avx2, 7, ebx, 5) \
+ _ (rtm, 7, ebx, 11) \
+ _ (pqm, 7, ebx, 12) \
+ _ (pqe, 7, ebx, 15) \
+ _ (avx512f, 7, ebx, 16) \
+ _ (rdseed, 7, ebx, 18) \
+ _ (x86_aes, 1, ecx, 25) \
+ _ (sha, 7, ebx, 29) \
+ _ (vaes, 7, ecx, 9) \
+ _ (vpclmulqdq, 7, ecx, 10) \
+ _ (avx512_vnni, 7, ecx, 11) \
+ _ (avx512_bitalg, 7, ecx, 12) \
+ _ (avx512_vpopcntdq, 7, ecx, 14) \
+ _ (movdiri, 7, ecx, 27) \
+ _ (movdir64b, 7, ecx, 28) \
+ _ (invariant_tsc, 0x80000007, edx, 8)
#define foreach_aarch64_flags \
_ (fp, 0) \
_ (sha512, 21) \
_ (sve, 22)
+u32 clib_get_current_cpu_id ();
+u32 clib_get_current_numa_node ();
+
#if defined(__x86_64__)
#include "cpuid.h"
return 1;
}
+typedef int (*clib_cpu_supports_func_t) ();
#define _(flag, func, reg, bit) \
static inline int \
static inline int
clib_cpu_supports_aes ()
{
-#if defined (__aarch64__)
+#if defined(__x86_64__)
return clib_cpu_supports_x86_aes ();
#elif defined (__aarch64__)
return clib_cpu_supports_aarch64_aes ();
#endif
}
+static inline int
+clib_cpu_march_priority_icl ()
+{
+ if (clib_cpu_supports_avx512_bitalg ())
+ return 200;
+ return -1;
+}
+
+static inline int
+clib_cpu_march_priority_skx ()
+{
+ if (clib_cpu_supports_avx512f ())
+ return 100;
+ return -1;
+}
+
+static inline int
+clib_cpu_march_priority_trm ()
+{
+ if (clib_cpu_supports_movdiri ())
+ return 60;
+ return -1;
+}
+
+static inline int
+clib_cpu_march_priority_hsw ()
+{
+ if (clib_cpu_supports_avx2 ())
+ return 50;
+ return -1;
+}
+
+static inline u32
+clib_cpu_implementer ()
+{
+ char buf[128];
+ static u32 implementer = -1;
+
+ if (-1 != implementer)
+ return implementer;
+
+ FILE *fp = fopen ("/proc/cpuinfo", "r");
+ if (!fp)
+ return implementer;
+
+ while (!feof (fp))
+ {
+ if (!fgets (buf, sizeof (buf), fp))
+ break;
+ buf[127] = '\0';
+ if (strstr (buf, "CPU implementer"))
+ implementer = (u32) strtol (memchr (buf, ':', 128) + 2, NULL, 0);
+ if (-1 != implementer)
+ break;
+ }
+ fclose (fp);
+
+ return implementer;
+}
+
+static inline u32
+clib_cpu_part ()
+{
+ char buf[128];
+ static u32 part = -1;
+
+ if (-1 != part)
+ return part;
+
+ FILE *fp = fopen ("/proc/cpuinfo", "r");
+ if (!fp)
+ return part;
+
+ while (!feof (fp))
+ {
+ if (!fgets (buf, sizeof (buf), fp))
+ break;
+ buf[127] = '\0';
+ if (strstr (buf, "CPU part"))
+ part = (u32) strtol (memchr (buf, ':', 128) + 2, NULL, 0);
+ if (-1 != part)
+ break;
+ }
+ fclose (fp);
+
+ return part;
+}
+
+#define AARCH64_CPU_IMPLEMENTER_CAVIUM 0x43
+#define AARCH64_CPU_PART_THUNDERX2 0x0af
+#define AARCH64_CPU_PART_OCTEONTX2T96 0x0b2
+#define AARCH64_CPU_PART_OCTEONTX2T98 0x0b1
+#define AARCH64_CPU_IMPLEMENTER_QDF24XX 0x51
+#define AARCH64_CPU_PART_QDF24XX 0xc00
+#define AARCH64_CPU_IMPLEMENTER_CORTEXA72 0x41
+#define AARCH64_CPU_PART_CORTEXA72 0xd08
+#define AARCH64_CPU_IMPLEMENTER_NEOVERSEN1 0x41
+#define AARCH64_CPU_PART_NEOVERSEN1 0xd0c
+
+static inline int
+clib_cpu_march_priority_octeontx2 ()
+{
+ if ((AARCH64_CPU_IMPLEMENTER_CAVIUM == clib_cpu_implementer ()) &&
+ ((AARCH64_CPU_PART_OCTEONTX2T96 == clib_cpu_part ())
+ || AARCH64_CPU_PART_OCTEONTX2T98 == clib_cpu_part ()))
+ return 20;
+ return -1;
+}
+
+static inline int
+clib_cpu_march_priority_thunderx2t99 ()
+{
+ if ((AARCH64_CPU_IMPLEMENTER_CAVIUM == clib_cpu_implementer ()) &&
+ (AARCH64_CPU_PART_THUNDERX2 == clib_cpu_part ()))
+ return 20;
+ return -1;
+}
+
+static inline int
+clib_cpu_march_priority_qdf24xx ()
+{
+ if ((AARCH64_CPU_IMPLEMENTER_QDF24XX == clib_cpu_implementer ()) &&
+ (AARCH64_CPU_PART_QDF24XX == clib_cpu_part ()))
+ return 20;
+ return -1;
+}
+
+static inline int
+clib_cpu_march_priority_cortexa72 ()
+{
+ if ((AARCH64_CPU_IMPLEMENTER_CORTEXA72 == clib_cpu_implementer ()) &&
+ (AARCH64_CPU_PART_CORTEXA72 == clib_cpu_part ()))
+ return 10;
+ return -1;
+}
+
+static inline int
+clib_cpu_march_priority_neoversen1 ()
+{
+ if ((AARCH64_CPU_IMPLEMENTER_NEOVERSEN1 == clib_cpu_implementer ()) &&
+ (AARCH64_CPU_PART_NEOVERSEN1 == clib_cpu_part ()))
+ return 10;
+ return -1;
+}
+
+#ifdef CLIB_MARCH_VARIANT
+#define CLIB_MARCH_FN_PRIORITY() CLIB_MARCH_SFX(clib_cpu_march_priority)()
+#else
+#define CLIB_MARCH_FN_PRIORITY() 0
+#endif
#endif /* included_clib_cpu_h */
+#define CLIB_MARCH_FN_CONSTRUCTOR(fn) \
+static void __clib_constructor \
+CLIB_MARCH_SFX(fn ## _march_constructor) (void) \
+{ \
+ if (CLIB_MARCH_FN_PRIORITY() > fn ## _selected_priority) \
+ { \
+ fn ## _selected = & CLIB_MARCH_SFX (fn ## _ma); \
+ fn ## _selected_priority = CLIB_MARCH_FN_PRIORITY(); \
+ } \
+} \
+
+#ifndef CLIB_MARCH_VARIANT
+#define CLIB_MARCH_FN(fn, rtype, _args...) \
+ static rtype CLIB_MARCH_SFX (fn##_ma) (_args); \
+ rtype (*fn##_selected) (_args) = &CLIB_MARCH_SFX (fn##_ma); \
+ int fn##_selected_priority = 0; \
+ static inline rtype CLIB_MARCH_SFX (fn##_ma) (_args)
+#else
+#define CLIB_MARCH_FN(fn, rtype, _args...) \
+ static rtype CLIB_MARCH_SFX (fn##_ma) (_args); \
+ extern rtype (*fn##_selected) (_args); \
+ extern int fn##_selected_priority; \
+ CLIB_MARCH_FN_CONSTRUCTOR (fn) \
+ static rtype CLIB_MARCH_SFX (fn##_ma) (_args)
+#endif
+
+#define CLIB_MARCH_FN_SELECT(fn) (* fn ## _selected)
+
format_function_t format_cpu_uarch;
format_function_t format_cpu_model_name;
format_function_t format_cpu_flags;