2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/clib.h>
17 #include <vppinfra/format.h>
18 #include <vppinfra/cpu.h>
20 #define foreach_x86_cpu_uarch \
21 _ (0x06, 0x9e, "Kaby Lake", "Kaby Lake DT/H/S/X") \
22 _ (0x06, 0x9c, "Tremont", "Jasper Lake") \
23 _ (0x06, 0x9a, "Alder Lake", "Alder Lake L") \
24 _ (0x06, 0x97, "Alder Lake", "Alder Lake") \
25 _ (0x06, 0x96, "Tremont", "Elkhart Lake") \
26 _ (0x06, 0x8f, "Sapphire Rapids", "Sapphire Rapids X") \
27 _ (0x06, 0x8e, "Kaby Lake", "Kaby Lake Y/U") \
28 _ (0x06, 0x8c, "Tiger Lake", "Tiger Lake U") \
29 _ (0x06, 0x86, "Tremont", "Jacobsville") \
30 _ (0x06, 0x85, "Knights Mill", "Knights Mill") \
31 _ (0x06, 0x7e, "Ice Lake", "Ice Lake U") \
32 _ (0x06, 0x7d, "Ice Lake", "Ice Lake Y") \
33 _ (0x06, 0x7a, "Goldmont Plus", "Gemini Lake") \
34 _ (0x06, 0x6c, "Ice Lake", "Ice Lake SP") \
35 _ (0x06, 0x6a, "Ice Lake", "Ice Lake DE") \
36 _ (0x06, 0x66, "Cannon Lake", "Cannon Lake U") \
37 _ (0x06, 0x5f, "Goldmont", "Denverton") \
38 _ (0x06, 0x5e, "Skylake", "Skylake DT/H/S") \
39 _ (0x06, 0x5c, "Goldmont", "Apollo Lake") \
40 _ (0x06, 0x5a, "Silvermont", "Moorefield") \
41 _ (0x06, 0x57, "Knights Landing", "Knights Landing") \
42 _ (0x06, 0x56, "Broadwell", "Broadwell DE") \
43 _ (0x06, 0x55, "Skylake", "Skylake X/SP") \
44 _ (0x06, 0x4f, "Broadwell", "Broadwell EP/EX") \
45 _ (0x06, 0x4e, "Skylake", "Skylake Y/U") \
46 _ (0x06, 0x4d, "Silvermont", "Rangeley") \
47 _ (0x06, 0x4c, "Airmont", "Braswell") \
48 _ (0x06, 0x47, "Broadwell", "Broadwell H") \
49 _ (0x06, 0x46, "Haswell", "Crystalwell") \
50 _ (0x06, 0x45, "Haswell", "Haswell ULT") \
51 _ (0x06, 0x3f, "Haswell", "Haswell E") \
52 _ (0x06, 0x3e, "Ivy Bridge", "Ivy Bridge E/EN/EP") \
53 _ (0x06, 0x3d, "Broadwell", "Broadwell U") \
54 _ (0x06, 0x3c, "Haswell", "Haswell") \
55 _ (0x06, 0x3a, "Ivy Bridge", "IvyBridge") \
56 _ (0x06, 0x37, "Silvermont", "BayTrail") \
57 _ (0x06, 0x36, "Saltwell", "Cedarview,Centerton") \
58 _ (0x06, 0x35, "Saltwell", "Cloverview") \
59 _ (0x06, 0x2f, "Westmere", "Westmere EX") \
60 _ (0x06, 0x2e, "Nehalem", "Nehalem EX") \
61 _ (0x06, 0x2d, "Sandy Bridge", "SandyBridge E/EN/EP") \
62 _ (0x06, 0x2c, "Westmere", "Westmere EP/EX,Gulftown") \
63 _ (0x06, 0x2a, "Sandy Bridge", "Sandy Bridge") \
64 _ (0x06, 0x27, "Saltwell", "Medfield") \
65 _ (0x06, 0x26, "Bonnell", "Tunnel Creek") \
66 _ (0x06, 0x25, "Westmere", "Arrandale,Clarksdale") \
67 _ (0x06, 0x1e, "Nehalem", "Clarksfield,Lynnfield,Jasper Forest") \
68 _ (0x06, 0x1d, "Penryn", "Dunnington") \
69 _ (0x06, 0x1c, "Bonnell", "Pineview,Silverthorne") \
70 _ (0x06, 0x1a, "Nehalem", "Nehalem EP,Bloomfield)") \
71 _ (0x06, 0x17, "Penryn", "Yorkfield,Wolfdale,Penryn,Harpertown")
73 /* _(implementor-id, part-id, vendor-name, cpu-name, show CPU pass as string) */
74 #define foreach_aarch64_cpu_uarch \
75 _(0x41, 0xd03, "ARM", "Cortex-A53", 0) \
76 _(0x41, 0xd07, "ARM", "Cortex-A57", 0) \
77 _(0x41, 0xd08, "ARM", "Cortex-A72", 0) \
78 _(0x41, 0xd09, "ARM", "Cortex-A73", 0) \
79 _(0x41, 0xd0a, "ARM", "Cortex-A75", 0) \
80 _(0x41, 0xd0b, "ARM", "Cortex-A76", 0) \
81 _(0x41, 0xd0c, "ARM", "Neoverse-N1", 0) \
82 _(0x41, 0xd4a, "ARM", "Neoverse-E1", 0) \
83 _(0x43, 0x0a1, "Marvell", "THUNDERX CN88XX", 0) \
84 _(0x43, 0x0a2, "Marvell", "OCTEON TX CN81XX", 0) \
85 _(0x43, 0x0a3, "Marvell", "OCTEON TX CN83XX", 0) \
86 _(0x43, 0x0af, "Marvell", "THUNDERX2 CN99XX", 1) \
87 _(0x43, 0x0b1, "Marvell", "OCTEON TX2 CN98XX", 1) \
88 _(0x43, 0x0b2, "Marvell", "OCTEON TX2 CN96XX", 1)
91 format_cpu_uarch (u8 * s, va_list * args)
94 u32 __attribute__ ((unused)) eax, ebx, ecx, edx;
95 u8 model, family, stepping;
97 if (__get_cpuid (1, &eax, &ebx, &ecx, &edx) == 0)
98 return format (s, "unknown (missing cpuid)");
100 model = ((eax >> 4) & 0x0f) | ((eax >> 12) & 0xf0);
101 family = (eax >> 8) & 0x0f;
102 stepping = eax & 0x0f;
104 #define _(f,m,a,c) if ((model == m) && (family == f)) return \
105 format(s, "[0x%x] %s ([0x%02x] %s) stepping 0x%x", f, a, m, c, stepping);
106 foreach_x86_cpu_uarch
108 return format (s, "unknown (family 0x%02x model 0x%02x)", family, model);
112 unformat_input_t input;
113 u32 implementer, primary_part_number, variant, revision;
115 fd = open ("/proc/cpuinfo", 0);
117 return format (s, "unknown");
119 unformat_init_clib_file (&input, fd);
120 while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
122 if (unformat (&input, "CPU implementer%_: 0x%x", &implementer))
124 else if (unformat (&input, "CPU part%_: 0x%x", &primary_part_number))
126 else if (unformat (&input, "CPU variant%_: 0x%x", &variant))
128 else if (unformat (&input, "CPU revision%_: %u", &revision))
131 unformat_skip_line (&input);
133 unformat_free (&input);
136 #define _(i,p,a,c,_format) if ((implementer == i) && (primary_part_number == p)){ \
138 return format(s, "%s (%s PASS %c%u)", a, c, 'A'+variant, revision);\
140 if (implementer == 0x43)\
142 return format (s, "%s (%s PASS %u.%u)", a, c, variant, revision);}}
144 foreach_aarch64_cpu_uarch
146 return format (s, "unknown (implementer 0x%02x part 0x%03x PASS %u.%u)",
147 implementer, primary_part_number, variant, revision);
149 #else /* ! __x86_64__ */
150 return format (s, "unknown");
155 format_cpu_model_name (u8 * s, va_list * args)
158 u32 __attribute__ ((unused)) eax, ebx, ecx, edx;
162 if (__get_cpuid (1, &eax, &ebx, &ecx, &edx) == 0)
163 return format (s, "unknown (missing cpuid)");
165 __get_cpuid (0x80000000, &eax, &ebx, &ecx, &edx);
166 if (eax < 0x80000004)
167 return format (s, "unknown (missing ext feature)");
169 vec_validate (name, 48);
170 name_u32 = (u32 *) name;
172 __get_cpuid (0x80000002, &eax, &ebx, &ecx, &edx);
178 __get_cpuid (0x80000003, &eax, &ebx, &ecx, &edx);
184 __get_cpuid (0x80000004, &eax, &ebx, &ecx, &edx);
190 s = format (s, "%s", name);
194 #elif defined(__aarch64__)
195 return format (s, "armv8");
196 #else /* ! __x86_64__ */
197 return format (s, "unknown");
201 #if defined(__x86_64__) || defined(__aarch64__)
202 static inline char const *
203 flag_skip_prefix (char const *flag, const char *pfx, int len)
205 if (0 == strncmp (flag, pfx, len - 1))
206 return flag + len - 1;
212 format_cpu_flags (u8 *s, va_list *args)
214 #if defined(__x86_64__)
215 #define _(flag, func, reg, bit) \
216 if (clib_cpu_supports_##flag ()) \
217 s = format (s, "%s ", flag_skip_prefix (#flag, "x86_", sizeof ("x86_")));
218 foreach_x86_64_flags return s;
220 #elif defined(__aarch64__)
221 #define _(flag, bit) \
222 if (clib_cpu_supports_##flag ()) \
223 s = format (s, "%s ", \
224 flag_skip_prefix (#flag, "aarch64_", sizeof ("aarch64_")));
225 foreach_aarch64_flags return s;
227 #else /* ! ! __x86_64__ && ! __aarch64__ */
228 return format (s, "unknown");
233 clib_get_current_cpu_id ()
236 syscall (__NR_getcpu, &cpu, &node, 0);
241 clib_get_current_numa_node ()
244 syscall (__NR_getcpu, &cpu, &node, 0);
249 format_march_variant (u8 *s, va_list *args)
251 clib_march_variant_type_t t = va_arg (*args, clib_march_variant_type_t);
252 char *variants[] = { [0] = "default",
253 #define _(s, n) [CLIB_MARCH_VARIANT_TYPE_##s] = n,
254 foreach_march_variant
257 return format (s, "%s", variants[t]);
261 * fd.io coding-style-patch-verification: ON
264 * eval: (c-set-style "gnu")