1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2021 Cisco Systems, Inc.
5 #include <vppinfra/format.h>
6 #include <vppinfra/test/test.h>
7 #include <vppinfra/error.h>
12 test_march_supported (clib_march_variant_type_t type)
15 if (CLIB_MARCH_VARIANT_TYPE_##s == type) \
16 return clib_cpu_march_priority_##s ();
23 test_funct (test_main_t *tm)
25 for (int i = 0; i < CLIB_MARCH_TYPE_N_VARIANTS; i++)
27 test_registration_t *r = tm->registrations[i];
29 if (r == 0 || test_march_supported (i) < 0)
32 fformat (stdout, "\nMultiarch Variant: %U\n", format_march_variant, i);
34 "-------------------------------------------------------\n");
38 if (tm->filter && strstr (r->name, (char *) tm->filter) == 0)
41 fformat (stdout, "%-50s %s\n", r->name, err ? "FAIL" : "PASS");
42 for (int i = 0; i < vec_len (tm->allocated_mem); i++)
43 clib_mem_free (tm->allocated_mem[i]);
44 vec_free (tm->allocated_mem);
47 clib_error_report (err);
48 fformat (stdout, "\n");
55 fformat (stdout, "\n");
61 format_test_perf_bundle_core_power (u8 *s, va_list *args)
63 test_perf_event_bundle_t __clib_unused *b =
64 va_arg (*args, test_perf_event_bundle_t *);
65 test_perf_t __clib_unused *tp = va_arg (*args, test_perf_t *);
66 u64 *data = va_arg (*args, u64 *);
69 s = format (s, "%7.1f %%", (f64) 100 * data[1] / data[0]);
71 s = format (s, "%9s", "Level 0");
74 s = format (s, "%8.1f %%", (f64) 100 * data[2] / data[0]);
76 s = format (s, "%9s", "Level 1");
79 s = format (s, "%7.1f %%", (f64) 100 * data[3] / data[0]);
81 s = format (s, "%9s", "Level 2");
87 #define PERF_INTEL_CODE(event, umask) ((event) | (umask) << 8)
92 "Core cycles where the core was running under specific turbo schedule.",
93 .type = PERF_TYPE_RAW,
94 .config[0] = PERF_INTEL_CODE (0x3c, 0x00),
95 .config[1] = PERF_INTEL_CODE (0x28, 0x07),
96 .config[2] = PERF_INTEL_CODE (0x28, 0x18),
97 .config[3] = PERF_INTEL_CODE (0x28, 0x20),
98 .config[4] = PERF_INTEL_CODE (0x28, 0x40),
100 .format_fn = format_test_perf_bundle_core_power,
108 test_perf (test_main_t *tm)
110 clib_error_t *err = 0;
111 clib_perfmon_ctx_t _ctx, *ctx = &_ctx;
113 if ((err = clib_perfmon_init_by_bundle_name (
114 ctx, "%s", tm->bundle ? (char *) tm->bundle : "default")))
117 fformat (stdout, "Warming up...\n");
118 clib_perfmon_warmup (ctx);
120 for (int i = 0; i < CLIB_MARCH_TYPE_N_VARIANTS; i++)
122 test_registration_t *r = tm->registrations[i];
124 if (r == 0 || test_march_supported (i) < 0)
127 fformat (stdout, "\nMultiarch Variant: %U\n", format_march_variant, i);
129 "-------------------------------------------------------\n");
134 test_perf_t *pt = r->perf_tests;
135 if (tm->filter && strstr (r->name, (char *) tm->filter) == 0)
138 clib_perfmon_capture_group (ctx, "%s", r->name);
141 for (int i = 0; i < tm->repeat; i++)
143 pt->fd = ctx->group_fd;
144 clib_perfmon_reset (ctx);
146 clib_perfmon_capture (ctx, pt->n_ops, "%0s", pt->name);
147 for (int i = 0; i < vec_len (tm->allocated_mem); i++)
148 clib_mem_free (tm->allocated_mem[i]);
149 vec_free (tm->allocated_mem);
157 fformat (stdout, "%U\n", format_perfmon_bundle, ctx);
158 clib_perfmon_clear (ctx);
161 clib_perfmon_free (ctx);
167 main (int argc, char *argv[])
169 test_main_t *tm = &test_main;
170 unformat_input_t _i = {}, *i = &_i;
171 clib_mem_init (0, 64ULL << 20);
178 unformat_init_command_line (i, argv);
180 while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
182 if (unformat (i, "perf"))
184 else if (unformat (i, "filter %s", &tm->filter))
186 else if (unformat (i, "bundle %s", &tm->bundle))
188 else if (unformat (i, "repeat %d", &tm->repeat))
192 clib_warning ("unknown input '%U'", format_unformat_error, i);
198 err = test_perf (tm);
200 err = test_funct (tm);
204 clib_error_report (err);
205 fformat (stderr, "\n");
212 test_mem_alloc (uword size)
215 size = round_pow2 (size, CLIB_CACHE_LINE_BYTES);
216 rv = clib_mem_alloc_aligned (size, CLIB_CACHE_LINE_BYTES);
217 clib_memset_u8 (rv, 0, size);
218 vec_add1 (test_main.allocated_mem, rv);
223 test_mem_alloc_and_fill_inc_u8 (uword size, u8 start, u8 mask)
226 mask = mask ? mask : 0xff;
227 size = round_pow2 (size, CLIB_CACHE_LINE_BYTES);
228 rv = clib_mem_alloc_aligned (size, CLIB_CACHE_LINE_BYTES);
229 for (uword i = 0; i < size; i++)
230 rv[i] = ((u8) i + start) & mask;
231 vec_add1 (test_main.allocated_mem, rv);
236 test_mem_alloc_and_splat (uword elt_size, uword n_elts, void *elt)
239 uword data_size = elt_size * n_elts;
240 uword alloc_size = round_pow2 (data_size, CLIB_CACHE_LINE_BYTES);
241 e = rv = clib_mem_alloc_aligned (alloc_size, CLIB_CACHE_LINE_BYTES);
242 while (e - rv < data_size)
244 clib_memcpy_fast (e, elt, elt_size);
248 if (data_size < alloc_size)
249 clib_memset_u8 (e, 0, alloc_size - data_size);
250 vec_add1 (test_main.allocated_mem, rv);